From 266f9dbe16605592e74c8ee7e159f9a2aaeee0ce Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 1 Mar 2020 11:20:42 +0100 Subject: [PATCH 1/6] Remove vendor dir --- vendor/bazil.org/fuse/.gitattributes | 2 - vendor/bazil.org/fuse/.gitignore | 11 - vendor/bazil.org/fuse/LICENSE | 93 - vendor/bazil.org/fuse/README.md | 23 - vendor/bazil.org/fuse/buffer.go | 35 - vendor/bazil.org/fuse/debug.go | 21 - vendor/bazil.org/fuse/error_darwin.go | 17 - vendor/bazil.org/fuse/error_freebsd.go | 15 - vendor/bazil.org/fuse/error_linux.go | 17 - vendor/bazil.org/fuse/error_std.go | 31 - vendor/bazil.org/fuse/fs/serve.go | 1568 --- vendor/bazil.org/fuse/fs/tree.go | 99 - vendor/bazil.org/fuse/fuse.go | 2304 --- vendor/bazil.org/fuse/fuse_darwin.go | 9 - vendor/bazil.org/fuse/fuse_freebsd.go | 6 - vendor/bazil.org/fuse/fuse_kernel.go | 774 -- vendor/bazil.org/fuse/fuse_kernel_darwin.go | 88 - vendor/bazil.org/fuse/fuse_kernel_freebsd.go | 62 - vendor/bazil.org/fuse/fuse_kernel_linux.go | 70 - vendor/bazil.org/fuse/fuse_kernel_std.go | 1 - vendor/bazil.org/fuse/fuse_linux.go | 7 - vendor/bazil.org/fuse/fuseutil/fuseutil.go | 20 - vendor/bazil.org/fuse/mount.go | 38 - vendor/bazil.org/fuse/mount_darwin.go | 208 - vendor/bazil.org/fuse/mount_freebsd.go | 111 - vendor/bazil.org/fuse/mount_linux.go | 150 - vendor/bazil.org/fuse/options.go | 310 - vendor/bazil.org/fuse/options_darwin.go | 35 - vendor/bazil.org/fuse/options_freebsd.go | 28 - vendor/bazil.org/fuse/options_linux.go | 25 - vendor/bazil.org/fuse/protocol.go | 75 - vendor/bazil.org/fuse/unmount.go | 6 - vendor/bazil.org/fuse/unmount_linux.go | 21 - vendor/bazil.org/fuse/unmount_std.go | 17 - vendor/cloud.google.com/go/AUTHORS | 15 - vendor/cloud.google.com/go/CONTRIBUTORS | 40 - vendor/cloud.google.com/go/LICENSE | 202 - .../go/compute/metadata/metadata.go | 513 - .../github.com/Azure/azure-sdk-for-go/LICENSE | 202 - .../github.com/Azure/azure-sdk-for-go/NOTICE | 5 - .../Azure/azure-sdk-for-go/storage/README.md | 22 - .../azure-sdk-for-go/storage/appendblob.go | 91 - .../azure-sdk-for-go/storage/authorization.go | 246 - .../Azure/azure-sdk-for-go/storage/blob.go | 632 - .../azure-sdk-for-go/storage/blobsasuri.go | 174 - .../storage/blobserviceclient.go | 186 - .../azure-sdk-for-go/storage/blockblob.go | 270 - .../Azure/azure-sdk-for-go/storage/client.go | 988 -- .../azure-sdk-for-go/storage/commonsasuri.go | 38 - .../azure-sdk-for-go/storage/container.go | 640 - .../azure-sdk-for-go/storage/copyblob.go | 237 - .../azure-sdk-for-go/storage/directory.go | 238 - .../Azure/azure-sdk-for-go/storage/entity.go | 456 - .../Azure/azure-sdk-for-go/storage/file.go | 480 - .../storage/fileserviceclient.go | 338 - .../azure-sdk-for-go/storage/leaseblob.go | 201 - .../Azure/azure-sdk-for-go/storage/message.go | 171 - .../Azure/azure-sdk-for-go/storage/odata.go | 47 - .../azure-sdk-for-go/storage/pageblob.go | 203 - .../Azure/azure-sdk-for-go/storage/queue.go | 436 - .../azure-sdk-for-go/storage/queuesasuri.go | 146 - .../storage/queueserviceclient.go | 42 - .../Azure/azure-sdk-for-go/storage/share.go | 216 - .../azure-sdk-for-go/storage/storagepolicy.go | 61 - .../storage/storageservice.go | 131 - .../Azure/azure-sdk-for-go/storage/table.go | 419 - .../azure-sdk-for-go/storage/table_batch.go | 328 - .../storage/tableserviceclient.go | 204 - .../Azure/azure-sdk-for-go/storage/util.go | 244 - .../Azure/azure-sdk-for-go/version/version.go | 21 - .../Azure/go-autorest/autorest/LICENSE | 191 - .../Azure/go-autorest/autorest/adal/LICENSE | 191 - .../Azure/go-autorest/autorest/adal/README.md | 292 - .../Azure/go-autorest/autorest/adal/config.go | 151 - .../go-autorest/autorest/adal/devicetoken.go | 242 - .../Azure/go-autorest/autorest/adal/go.mod | 11 - .../Azure/go-autorest/autorest/adal/go.sum | 12 - .../go-autorest/autorest/adal/persist.go | 73 - .../Azure/go-autorest/autorest/adal/sender.go | 95 - .../Azure/go-autorest/autorest/adal/token.go | 1080 -- .../go-autorest/autorest/adal/version.go | 45 - .../go-autorest/autorest/authorization.go | 336 - .../Azure/go-autorest/autorest/autorest.go | 150 - .../Azure/go-autorest/autorest/azure/async.go | 924 -- .../Azure/go-autorest/autorest/azure/azure.go | 326 - .../autorest/azure/environments.go | 244 - .../autorest/azure/metadata_environment.go | 245 - .../Azure/go-autorest/autorest/azure/rp.go | 200 - .../Azure/go-autorest/autorest/client.go | 300 - .../Azure/go-autorest/autorest/date/LICENSE | 191 - .../Azure/go-autorest/autorest/date/date.go | 96 - .../Azure/go-autorest/autorest/date/go.mod | 3 - .../Azure/go-autorest/autorest/date/time.go | 103 - .../go-autorest/autorest/date/timerfc1123.go | 100 - .../go-autorest/autorest/date/unixtime.go | 123 - .../go-autorest/autorest/date/utility.go | 25 - .../Azure/go-autorest/autorest/error.go | 98 - .../Azure/go-autorest/autorest/go.mod | 11 - .../Azure/go-autorest/autorest/go.sum | 18 - .../Azure/go-autorest/autorest/preparer.go | 550 - .../Azure/go-autorest/autorest/responder.go | 269 - .../go-autorest/autorest/retriablerequest.go | 52 - .../autorest/retriablerequest_1.7.go | 54 - .../autorest/retriablerequest_1.8.go | 66 - .../Azure/go-autorest/autorest/sender.go | 407 - .../Azure/go-autorest/autorest/utility.go | 228 - .../Azure/go-autorest/autorest/version.go | 41 - .../Azure/go-autorest/logger/LICENSE | 191 - .../Azure/go-autorest/logger/go.mod | 3 - .../Azure/go-autorest/logger/logger.go | 328 - .../Azure/go-autorest/tracing/LICENSE | 191 - .../Azure/go-autorest/tracing/go.mod | 3 - .../Azure/go-autorest/tracing/tracing.go | 67 - vendor/github.com/cenkalti/backoff/.gitignore | 22 - .../github.com/cenkalti/backoff/.travis.yml | 10 - vendor/github.com/cenkalti/backoff/LICENSE | 20 - vendor/github.com/cenkalti/backoff/README.md | 30 - vendor/github.com/cenkalti/backoff/backoff.go | 66 - vendor/github.com/cenkalti/backoff/context.go | 63 - .../cenkalti/backoff/exponential.go | 153 - vendor/github.com/cenkalti/backoff/retry.go | 82 - vendor/github.com/cenkalti/backoff/ticker.go | 82 - vendor/github.com/cenkalti/backoff/tries.go | 35 - vendor/github.com/cespare/xxhash/LICENSE.txt | 22 - vendor/github.com/cespare/xxhash/README.md | 50 - vendor/github.com/cespare/xxhash/go.mod | 6 - vendor/github.com/cespare/xxhash/go.sum | 4 - vendor/github.com/cespare/xxhash/rotate.go | 14 - vendor/github.com/cespare/xxhash/rotate19.go | 14 - vendor/github.com/cespare/xxhash/xxhash.go | 168 - .../github.com/cespare/xxhash/xxhash_amd64.go | 12 - .../github.com/cespare/xxhash/xxhash_amd64.s | 233 - .../github.com/cespare/xxhash/xxhash_other.go | 75 - .../github.com/cespare/xxhash/xxhash_safe.go | 10 - .../cespare/xxhash/xxhash_unsafe.go | 30 - .../github.com/cpuguy83/go-md2man/LICENSE.md | 21 - .../cpuguy83/go-md2man/md2man/md2man.go | 20 - .../cpuguy83/go-md2man/md2man/roff.go | 285 - vendor/github.com/dgrijalva/jwt-go/.gitignore | 4 - .../github.com/dgrijalva/jwt-go/.travis.yml | 13 - vendor/github.com/dgrijalva/jwt-go/LICENSE | 8 - .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 - vendor/github.com/dgrijalva/jwt-go/README.md | 100 - .../dgrijalva/jwt-go/VERSION_HISTORY.md | 118 - vendor/github.com/dgrijalva/jwt-go/claims.go | 134 - vendor/github.com/dgrijalva/jwt-go/doc.go | 4 - vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 148 - .../dgrijalva/jwt-go/ecdsa_utils.go | 67 - vendor/github.com/dgrijalva/jwt-go/errors.go | 59 - vendor/github.com/dgrijalva/jwt-go/hmac.go | 95 - .../github.com/dgrijalva/jwt-go/map_claims.go | 94 - vendor/github.com/dgrijalva/jwt-go/none.go | 52 - vendor/github.com/dgrijalva/jwt-go/parser.go | 148 - vendor/github.com/dgrijalva/jwt-go/rsa.go | 101 - vendor/github.com/dgrijalva/jwt-go/rsa_pss.go | 126 - .../github.com/dgrijalva/jwt-go/rsa_utils.go | 101 - .../dgrijalva/jwt-go/signing_method.go | 35 - vendor/github.com/dgrijalva/jwt-go/token.go | 108 - .../elithrar/simple-scrypt/.gitignore | 26 - .../elithrar/simple-scrypt/.travis.yml | 18 - .../github.com/elithrar/simple-scrypt/LICENSE | 22 - .../elithrar/simple-scrypt/README.md | 155 - .../elithrar/simple-scrypt/scrypt.go | 309 - vendor/github.com/golang/protobuf/AUTHORS | 3 - .../github.com/golang/protobuf/CONTRIBUTORS | 3 - vendor/github.com/golang/protobuf/LICENSE | 28 - .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 - .../golang/protobuf/proto/deprecated.go | 63 - .../golang/protobuf/proto/discard.go | 350 - .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../golang/protobuf/proto/extensions.go | 607 - .../github.com/golang/protobuf/proto/lib.go | 965 -- .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../golang/protobuf/proto/properties.go | 545 - .../golang/protobuf/proto/table_marshal.go | 2776 ---- .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 --- .../github.com/golang/protobuf/proto/text.go | 843 -- .../golang/protobuf/proto/text_parser.go | 880 -- .../github.com/golang/protobuf/ptypes/any.go | 141 - .../golang/protobuf/ptypes/any/any.pb.go | 200 - .../golang/protobuf/ptypes/any/any.proto | 154 - .../github.com/golang/protobuf/ptypes/doc.go | 35 - .../golang/protobuf/ptypes/duration.go | 102 - .../protobuf/ptypes/duration/duration.pb.go | 161 - .../protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/timestamp.go | 132 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 179 - .../protobuf/ptypes/timestamp/timestamp.proto | 135 - vendor/github.com/google/go-cmp/LICENSE | 27 - .../google/go-cmp/cmp/cmpopts/equate.go | 89 - .../google/go-cmp/cmp/cmpopts/ignore.go | 145 - .../google/go-cmp/cmp/cmpopts/sort.go | 146 - .../google/go-cmp/cmp/cmpopts/sort_go17.go | 46 - .../google/go-cmp/cmp/cmpopts/sort_go18.go | 31 - .../go-cmp/cmp/cmpopts/struct_filter.go | 182 - .../github.com/google/go-cmp/cmp/compare.go | 553 - .../go-cmp/cmp/internal/diff/debug_disable.go | 17 - .../go-cmp/cmp/internal/diff/debug_enable.go | 122 - .../google/go-cmp/cmp/internal/diff/diff.go | 363 - .../go-cmp/cmp/internal/function/func.go | 49 - .../go-cmp/cmp/internal/value/format.go | 277 - .../google/go-cmp/cmp/internal/value/sort.go | 111 - .../github.com/google/go-cmp/cmp/options.go | 453 - vendor/github.com/google/go-cmp/cmp/path.go | 309 - .../github.com/google/go-cmp/cmp/reporter.go | 53 - .../google/go-cmp/cmp/unsafe_panic.go | 15 - .../google/go-cmp/cmp/unsafe_reflect.go | 23 - .../github.com/hashicorp/golang-lru/LICENSE | 362 - .../hashicorp/golang-lru/simplelru/lru.go | 161 - .../golang-lru/simplelru/lru_interface.go | 36 - .../inconshreveable/mousetrap/LICENSE | 13 - .../inconshreveable/mousetrap/README.md | 23 - .../inconshreveable/mousetrap/trap_others.go | 15 - .../inconshreveable/mousetrap/trap_windows.go | 98 - .../mousetrap/trap_windows_1.4.go | 46 - vendor/github.com/juju/ratelimit/LICENSE | 191 - vendor/github.com/juju/ratelimit/README.md | 117 - vendor/github.com/juju/ratelimit/ratelimit.go | 344 - vendor/github.com/juju/ratelimit/reader.go | 51 - vendor/github.com/kr/fs/LICENSE | 27 - vendor/github.com/kr/fs/Readme | 3 - vendor/github.com/kr/fs/filesystem.go | 36 - vendor/github.com/kr/fs/go.mod | 1 - vendor/github.com/kr/fs/walk.go | 95 - vendor/github.com/kurin/blazer/AUTHORS | 8 - vendor/github.com/kurin/blazer/LICENSE | 13 - vendor/github.com/kurin/blazer/b2/b2.go | 653 - vendor/github.com/kurin/blazer/b2/backend.go | 774 -- vendor/github.com/kurin/blazer/b2/baseline.go | 517 - vendor/github.com/kurin/blazer/b2/buffer.go | 185 - vendor/github.com/kurin/blazer/b2/iterator.go | 331 - vendor/github.com/kurin/blazer/b2/key.go | 156 - vendor/github.com/kurin/blazer/b2/monitor.go | 251 - vendor/github.com/kurin/blazer/b2/reader.go | 348 - vendor/github.com/kurin/blazer/b2/readerat.go | 48 - vendor/github.com/kurin/blazer/b2/writer.go | 613 - vendor/github.com/kurin/blazer/base/base.go | 1298 -- .../github.com/kurin/blazer/base/strings.go | 28 - .../blazer/internal/b2assets/b2assets.go | 237 - .../kurin/blazer/internal/b2assets/gen.go | 18 - .../kurin/blazer/internal/b2types/b2types.go | 281 - .../kurin/blazer/internal/blog/blog.go | 54 - .../kurin/blazer/x/window/window.go | 162 - vendor/github.com/marstr/guid/.travis.yml | 18 - vendor/github.com/marstr/guid/LICENSE.txt | 21 - vendor/github.com/marstr/guid/README.md | 27 - vendor/github.com/marstr/guid/guid.go | 301 - .../github.com/minio/minio-go/v6/.gitignore | 3 - .../github.com/minio/minio-go/v6/.travis.yml | 30 - .../minio/minio-go/v6/CONTRIBUTING.md | 23 - vendor/github.com/minio/minio-go/v6/LICENSE | 202 - .../minio/minio-go/v6/MAINTAINERS.md | 35 - vendor/github.com/minio/minio-go/v6/Makefile | 22 - vendor/github.com/minio/minio-go/v6/NOTICE | 2 - vendor/github.com/minio/minio-go/v6/README.md | 238 - .../minio/minio-go/v6/README_zh_CN.md | 244 - .../minio/minio-go/v6/api-compose-object.go | 565 - .../minio/minio-go/v6/api-datatypes.go | 120 - .../minio/minio-go/v6/api-error-response.go | 282 - .../minio/minio-go/v6/api-get-lifecycle.go | 77 - .../minio/minio-go/v6/api-get-object-acl.go | 136 - .../minio-go/v6/api-get-object-context.go | 26 - .../minio/minio-go/v6/api-get-object-file.go | 125 - .../minio/minio-go/v6/api-get-object.go | 661 - .../minio/minio-go/v6/api-get-options.go | 128 - .../minio/minio-go/v6/api-get-policy.go | 78 - .../github.com/minio/minio-go/v6/api-list.go | 811 -- .../minio/minio-go/v6/api-notification.go | 231 - .../minio/minio-go/v6/api-object-lock.go | 232 - .../minio/minio-go/v6/api-object-retention.go | 168 - .../minio/minio-go/v6/api-presigned.go | 215 - .../minio/minio-go/v6/api-put-bucket.go | 383 - .../minio-go/v6/api-put-object-common.go | 138 - .../minio-go/v6/api-put-object-context.go | 33 - .../minio/minio-go/v6/api-put-object-copy.go | 83 - .../v6/api-put-object-file-context.go | 64 - .../minio/minio-go/v6/api-put-object-file.go | 27 - .../minio-go/v6/api-put-object-multipart.go | 372 - .../minio-go/v6/api-put-object-streaming.go | 417 - .../minio/minio-go/v6/api-put-object.go | 290 - .../minio/minio-go/v6/api-remove.go | 332 - .../minio/minio-go/v6/api-s3-datatypes.go | 245 - .../minio/minio-go/v6/api-select.go | 538 - .../github.com/minio/minio-go/v6/api-stat.go | 204 - vendor/github.com/minio/minio-go/v6/api.go | 936 -- .../github.com/minio/minio-go/v6/appveyor.yml | 35 - .../minio/minio-go/v6/bucket-cache.go | 238 - .../minio/minio-go/v6/bucket-notification.go | 273 - .../github.com/minio/minio-go/v6/constants.go | 62 - vendor/github.com/minio/minio-go/v6/core.go | 201 - vendor/github.com/minio/minio-go/v6/go.mod | 14 - vendor/github.com/minio/minio-go/v6/go.sum | 41 - .../minio/minio-go/v6/hook-reader.go | 85 - .../minio-go/v6/pkg/credentials/chain.go | 89 - .../v6/pkg/credentials/config.json.sample | 17 - .../v6/pkg/credentials/credentials.go | 175 - .../v6/pkg/credentials/credentials.sample | 12 - .../minio/minio-go/v6/pkg/credentials/doc.go | 62 - .../minio-go/v6/pkg/credentials/env_aws.go | 71 - .../minio-go/v6/pkg/credentials/env_minio.go | 62 - .../pkg/credentials/file_aws_credentials.go | 120 - .../v6/pkg/credentials/file_minio_client.go | 133 - .../minio-go/v6/pkg/credentials/iam_aws.go | 250 - .../v6/pkg/credentials/signature-type.go | 77 - .../minio-go/v6/pkg/credentials/static.go | 67 - .../v6/pkg/credentials/sts_client_grants.go | 162 - .../v6/pkg/credentials/sts_ldap_identity.go | 119 - .../v6/pkg/credentials/sts_web_identity.go | 158 - .../minio-go/v6/pkg/encrypt/server-side.go | 195 - .../s3signer/request-signature-streaming.go | 306 - .../v6/pkg/s3signer/request-signature-v2.go | 316 - .../v6/pkg/s3signer/request-signature-v4.go | 315 - .../minio/minio-go/v6/pkg/s3signer/utils.go | 59 - .../minio/minio-go/v6/pkg/s3utils/utils.go | 331 - .../minio/minio-go/v6/pkg/set/stringset.go | 197 - .../minio/minio-go/v6/post-policy.go | 270 - .../minio/minio-go/v6/retry-continous.go | 69 - vendor/github.com/minio/minio-go/v6/retry.go | 157 - .../minio/minio-go/v6/s3-endpoints.go | 53 - .../github.com/minio/minio-go/v6/s3-error.go | 61 - .../minio/minio-go/v6/staticcheck.conf | 1 - .../github.com/minio/minio-go/v6/transport.go | 82 - vendor/github.com/minio/minio-go/v6/utils.go | 275 - .../github.com/minio/sha256-simd/.gitignore | 1 - .../github.com/minio/sha256-simd/.travis.yml | 25 - vendor/github.com/minio/sha256-simd/LICENSE | 202 - vendor/github.com/minio/sha256-simd/README.md | 133 - .../github.com/minio/sha256-simd/appveyor.yml | 32 - vendor/github.com/minio/sha256-simd/cpuid.go | 119 - .../github.com/minio/sha256-simd/cpuid_386.go | 24 - .../github.com/minio/sha256-simd/cpuid_386.s | 53 - .../minio/sha256-simd/cpuid_amd64.go | 24 - .../minio/sha256-simd/cpuid_amd64.s | 53 - .../github.com/minio/sha256-simd/cpuid_arm.go | 32 - .../minio/sha256-simd/cpuid_linux_arm64.go | 49 - .../minio/sha256-simd/cpuid_other.go | 34 - vendor/github.com/minio/sha256-simd/go.mod | 3 - vendor/github.com/minio/sha256-simd/sha256.go | 409 - .../sha256-simd/sha256blockAvx2_amd64.go | 22 - .../minio/sha256-simd/sha256blockAvx2_amd64.s | 1449 -- .../sha256-simd/sha256blockAvx512_amd64.asm | 686 - .../sha256-simd/sha256blockAvx512_amd64.go | 500 - .../sha256-simd/sha256blockAvx512_amd64.s | 267 - .../minio/sha256-simd/sha256blockAvx_amd64.go | 22 - .../minio/sha256-simd/sha256blockAvx_amd64.s | 408 - .../minio/sha256-simd/sha256blockSha_amd64.go | 6 - .../minio/sha256-simd/sha256blockSha_amd64.s | 266 - .../sha256-simd/sha256blockSsse_amd64.go | 22 - .../minio/sha256-simd/sha256blockSsse_amd64.s | 429 - .../minio/sha256-simd/sha256block_amd64.go | 53 - .../minio/sha256-simd/sha256block_arm64.go | 37 - .../minio/sha256-simd/sha256block_arm64.s | 192 - .../minio/sha256-simd/sha256block_other.go | 25 - .../minio/sha256-simd/test-architectures.sh | 15 - .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - vendor/github.com/mitchellh/go-homedir/go.mod | 1 - .../mitchellh/go-homedir/homedir.go | 167 - vendor/github.com/ncw/swift/.gitignore | 4 - vendor/github.com/ncw/swift/.travis.yml | 33 - vendor/github.com/ncw/swift/COPYING | 20 - vendor/github.com/ncw/swift/README.md | 156 - vendor/github.com/ncw/swift/auth.go | 335 - vendor/github.com/ncw/swift/auth_v3.go | 300 - .../github.com/ncw/swift/compatibility_1_0.go | 28 - .../github.com/ncw/swift/compatibility_1_1.go | 24 - .../github.com/ncw/swift/compatibility_1_6.go | 23 - .../ncw/swift/compatibility_not_1_6.go | 13 - vendor/github.com/ncw/swift/dlo.go | 136 - vendor/github.com/ncw/swift/doc.go | 19 - vendor/github.com/ncw/swift/go.mod | 1 - vendor/github.com/ncw/swift/largeobjects.go | 448 - vendor/github.com/ncw/swift/meta.go | 174 - vendor/github.com/ncw/swift/notes.txt | 55 - vendor/github.com/ncw/swift/slo.go | 171 - vendor/github.com/ncw/swift/swift.go | 2243 --- vendor/github.com/ncw/swift/timeout_reader.go | 59 - .../github.com/ncw/swift/travis_realserver.sh | 22 - .../github.com/ncw/swift/watchdog_reader.go | 55 - vendor/github.com/pkg/errors/.gitignore | 24 - vendor/github.com/pkg/errors/.travis.yml | 15 - vendor/github.com/pkg/errors/LICENSE | 23 - vendor/github.com/pkg/errors/README.md | 52 - vendor/github.com/pkg/errors/appveyor.yml | 32 - vendor/github.com/pkg/errors/errors.go | 282 - vendor/github.com/pkg/errors/stack.go | 147 - vendor/github.com/pkg/profile/.travis.yml | 10 - vendor/github.com/pkg/profile/AUTHORS | 1 - vendor/github.com/pkg/profile/LICENSE | 24 - vendor/github.com/pkg/profile/README.md | 54 - vendor/github.com/pkg/profile/mutex.go | 13 - vendor/github.com/pkg/profile/mutex17.go | 9 - vendor/github.com/pkg/profile/profile.go | 263 - vendor/github.com/pkg/profile/trace.go | 8 - vendor/github.com/pkg/profile/trace16.go | 10 - vendor/github.com/pkg/sftp/.gitignore | 7 - vendor/github.com/pkg/sftp/.travis.yml | 38 - vendor/github.com/pkg/sftp/CONTRIBUTORS | 3 - vendor/github.com/pkg/sftp/LICENSE | 9 - vendor/github.com/pkg/sftp/README.md | 44 - vendor/github.com/pkg/sftp/attrs.go | 243 - vendor/github.com/pkg/sftp/attrs_stubs.go | 11 - vendor/github.com/pkg/sftp/attrs_unix.go | 17 - vendor/github.com/pkg/sftp/client.go | 1283 -- vendor/github.com/pkg/sftp/conn.go | 146 - vendor/github.com/pkg/sftp/debug.go | 9 - vendor/github.com/pkg/sftp/match.go | 295 - vendor/github.com/pkg/sftp/packet-manager.go | 203 - vendor/github.com/pkg/sftp/packet-typing.go | 134 - vendor/github.com/pkg/sftp/packet.go | 959 -- vendor/github.com/pkg/sftp/release.go | 5 - vendor/github.com/pkg/sftp/request-attrs.go | 63 - vendor/github.com/pkg/sftp/request-errors.go | 42 - vendor/github.com/pkg/sftp/request-example.go | 267 - .../github.com/pkg/sftp/request-interfaces.go | 55 - vendor/github.com/pkg/sftp/request-readme.md | 53 - vendor/github.com/pkg/sftp/request-server.go | 219 - vendor/github.com/pkg/sftp/request-unix.go | 23 - vendor/github.com/pkg/sftp/request.go | 383 - vendor/github.com/pkg/sftp/request_windows.go | 11 - vendor/github.com/pkg/sftp/server.go | 679 - .../pkg/sftp/server_statvfs_darwin.go | 21 - .../pkg/sftp/server_statvfs_impl.go | 25 - .../pkg/sftp/server_statvfs_linux.go | 22 - .../pkg/sftp/server_statvfs_stubs.go | 11 - vendor/github.com/pkg/sftp/server_stubs.go | 32 - vendor/github.com/pkg/sftp/server_unix.go | 54 - vendor/github.com/pkg/sftp/sftp.go | 217 - vendor/github.com/pkg/xattr/.gitignore | 29 - vendor/github.com/pkg/xattr/.travis.sh | 17 - vendor/github.com/pkg/xattr/.travis.yml | 26 - vendor/github.com/pkg/xattr/LICENSE | 25 - vendor/github.com/pkg/xattr/README.md | 46 - vendor/github.com/pkg/xattr/go.mod | 3 - vendor/github.com/pkg/xattr/xattr.go | 234 - vendor/github.com/pkg/xattr/xattr_bsd.go | 197 - vendor/github.com/pkg/xattr/xattr_darwin.go | 86 - vendor/github.com/pkg/xattr/xattr_linux.go | 81 - .../github.com/pkg/xattr/xattr_unsupported.go | 60 - vendor/github.com/restic/chunker/.travis.yml | 25 - vendor/github.com/restic/chunker/LICENSE | 23 - vendor/github.com/restic/chunker/README.md | 12 - vendor/github.com/restic/chunker/chunker.go | 378 - vendor/github.com/restic/chunker/doc.go | 82 - vendor/github.com/restic/chunker/go.mod | 1 - .../github.com/restic/chunker/polynomials.go | 310 - .../russross/blackfriday/.gitignore | 8 - .../russross/blackfriday/.travis.yml | 17 - .../russross/blackfriday/LICENSE.txt | 29 - .../github.com/russross/blackfriday/README.md | 369 - .../github.com/russross/blackfriday/block.go | 1474 -- vendor/github.com/russross/blackfriday/doc.go | 32 - vendor/github.com/russross/blackfriday/go.mod | 1 - .../github.com/russross/blackfriday/html.go | 938 -- .../github.com/russross/blackfriday/inline.go | 1154 -- .../github.com/russross/blackfriday/latex.go | 334 - .../russross/blackfriday/markdown.go | 941 -- .../russross/blackfriday/smartypants.go | 430 - vendor/github.com/satori/go.uuid/.travis.yml | 23 - vendor/github.com/satori/go.uuid/LICENSE | 20 - vendor/github.com/satori/go.uuid/README.md | 65 - vendor/github.com/satori/go.uuid/codec.go | 206 - vendor/github.com/satori/go.uuid/generator.go | 239 - vendor/github.com/satori/go.uuid/sql.go | 78 - vendor/github.com/satori/go.uuid/uuid.go | 161 - vendor/github.com/spf13/cobra/.gitignore | 36 - vendor/github.com/spf13/cobra/.mailmap | 3 - vendor/github.com/spf13/cobra/.travis.yml | 21 - vendor/github.com/spf13/cobra/LICENSE.txt | 174 - vendor/github.com/spf13/cobra/README.md | 736 - vendor/github.com/spf13/cobra/args.go | 89 - .../spf13/cobra/bash_completions.go | 584 - .../spf13/cobra/bash_completions.md | 221 - vendor/github.com/spf13/cobra/cobra.go | 200 - vendor/github.com/spf13/cobra/command.go | 1517 -- .../github.com/spf13/cobra/command_notwin.go | 5 - vendor/github.com/spf13/cobra/command_win.go | 20 - vendor/github.com/spf13/cobra/doc/man_docs.go | 236 - vendor/github.com/spf13/cobra/doc/man_docs.md | 31 - vendor/github.com/spf13/cobra/doc/md_docs.go | 159 - vendor/github.com/spf13/cobra/doc/md_docs.md | 115 - .../github.com/spf13/cobra/doc/rest_docs.go | 185 - .../github.com/spf13/cobra/doc/rest_docs.md | 114 - vendor/github.com/spf13/cobra/doc/util.go | 51 - .../github.com/spf13/cobra/doc/yaml_docs.go | 169 - .../github.com/spf13/cobra/doc/yaml_docs.md | 112 - .../github.com/spf13/cobra/zsh_completions.go | 126 - vendor/github.com/spf13/pflag/.gitignore | 2 - vendor/github.com/spf13/pflag/.travis.yml | 21 - vendor/github.com/spf13/pflag/LICENSE | 28 - vendor/github.com/spf13/pflag/README.md | 296 - vendor/github.com/spf13/pflag/bool.go | 94 - vendor/github.com/spf13/pflag/bool_slice.go | 147 - vendor/github.com/spf13/pflag/bytes.go | 209 - vendor/github.com/spf13/pflag/count.go | 96 - vendor/github.com/spf13/pflag/duration.go | 86 - .../github.com/spf13/pflag/duration_slice.go | 128 - vendor/github.com/spf13/pflag/flag.go | 1227 -- vendor/github.com/spf13/pflag/float32.go | 88 - vendor/github.com/spf13/pflag/float64.go | 84 - vendor/github.com/spf13/pflag/golangflag.go | 105 - vendor/github.com/spf13/pflag/int.go | 84 - vendor/github.com/spf13/pflag/int16.go | 88 - vendor/github.com/spf13/pflag/int32.go | 88 - vendor/github.com/spf13/pflag/int64.go | 84 - vendor/github.com/spf13/pflag/int8.go | 88 - vendor/github.com/spf13/pflag/int_slice.go | 128 - vendor/github.com/spf13/pflag/ip.go | 94 - vendor/github.com/spf13/pflag/ip_slice.go | 148 - vendor/github.com/spf13/pflag/ipmask.go | 122 - vendor/github.com/spf13/pflag/ipnet.go | 98 - vendor/github.com/spf13/pflag/string.go | 80 - vendor/github.com/spf13/pflag/string_array.go | 103 - vendor/github.com/spf13/pflag/string_slice.go | 149 - .../github.com/spf13/pflag/string_to_int.go | 149 - .../spf13/pflag/string_to_string.go | 160 - vendor/github.com/spf13/pflag/uint.go | 88 - vendor/github.com/spf13/pflag/uint16.go | 88 - vendor/github.com/spf13/pflag/uint32.go | 88 - vendor/github.com/spf13/pflag/uint64.go | 88 - vendor/github.com/spf13/pflag/uint8.go | 88 - vendor/github.com/spf13/pflag/uint_slice.go | 126 - vendor/go.opencensus.io/.gitignore | 9 - vendor/go.opencensus.io/.travis.yml | 17 - vendor/go.opencensus.io/AUTHORS | 1 - vendor/go.opencensus.io/CONTRIBUTING.md | 63 - vendor/go.opencensus.io/Gopkg.lock | 231 - vendor/go.opencensus.io/Gopkg.toml | 36 - vendor/go.opencensus.io/LICENSE | 202 - vendor/go.opencensus.io/Makefile | 95 - vendor/go.opencensus.io/README.md | 263 - vendor/go.opencensus.io/appveyor.yml | 25 - vendor/go.opencensus.io/go.mod | 13 - vendor/go.opencensus.io/go.sum | 127 - vendor/go.opencensus.io/internal/internal.go | 37 - vendor/go.opencensus.io/internal/sanitize.go | 50 - .../internal/tagencoding/tagencoding.go | 75 - .../internal/traceinternals.go | 53 - .../go.opencensus.io/metric/metricdata/doc.go | 19 - .../metric/metricdata/exemplar.go | 33 - .../metric/metricdata/label.go | 28 - .../metric/metricdata/metric.go | 46 - .../metric/metricdata/point.go | 193 - .../metric/metricdata/type_string.go | 16 - .../metric/metricdata/unit.go | 27 - .../metric/metricproducer/manager.go | 78 - .../metric/metricproducer/producer.go | 28 - vendor/go.opencensus.io/opencensus.go | 21 - .../go.opencensus.io/plugin/ochttp/client.go | 117 - .../plugin/ochttp/client_stats.go | 143 - vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 - .../plugin/ochttp/propagation/b3/b3.go | 123 - .../go.opencensus.io/plugin/ochttp/route.go | 61 - .../go.opencensus.io/plugin/ochttp/server.go | 440 - .../ochttp/span_annotating_client_trace.go | 169 - .../go.opencensus.io/plugin/ochttp/stats.go | 292 - .../go.opencensus.io/plugin/ochttp/trace.go | 239 - .../plugin/ochttp/wrapped_body.go | 44 - vendor/go.opencensus.io/resource/resource.go | 164 - vendor/go.opencensus.io/stats/doc.go | 69 - .../go.opencensus.io/stats/internal/record.go | 25 - vendor/go.opencensus.io/stats/measure.go | 109 - .../go.opencensus.io/stats/measure_float64.go | 55 - .../go.opencensus.io/stats/measure_int64.go | 55 - vendor/go.opencensus.io/stats/record.go | 69 - vendor/go.opencensus.io/stats/units.go | 25 - .../stats/view/aggregation.go | 120 - .../stats/view/aggregation_data.go | 293 - .../go.opencensus.io/stats/view/collector.go | 86 - vendor/go.opencensus.io/stats/view/doc.go | 47 - vendor/go.opencensus.io/stats/view/export.go | 58 - vendor/go.opencensus.io/stats/view/view.go | 221 - .../stats/view/view_to_metric.go | 131 - vendor/go.opencensus.io/stats/view/worker.go | 279 - .../stats/view/worker_commands.go | 182 - vendor/go.opencensus.io/tag/context.go | 43 - vendor/go.opencensus.io/tag/doc.go | 26 - vendor/go.opencensus.io/tag/key.go | 35 - vendor/go.opencensus.io/tag/map.go | 197 - vendor/go.opencensus.io/tag/map_codec.go | 237 - vendor/go.opencensus.io/tag/profile_19.go | 31 - vendor/go.opencensus.io/tag/profile_not19.go | 23 - vendor/go.opencensus.io/tag/validate.go | 56 - vendor/go.opencensus.io/trace/basetypes.go | 119 - vendor/go.opencensus.io/trace/config.go | 86 - vendor/go.opencensus.io/trace/doc.go | 53 - vendor/go.opencensus.io/trace/evictedqueue.go | 38 - vendor/go.opencensus.io/trace/export.go | 97 - .../trace/internal/internal.go | 22 - vendor/go.opencensus.io/trace/lrumap.go | 37 - .../trace/propagation/propagation.go | 108 - vendor/go.opencensus.io/trace/sampling.go | 75 - vendor/go.opencensus.io/trace/spanbucket.go | 130 - vendor/go.opencensus.io/trace/spanstore.go | 306 - vendor/go.opencensus.io/trace/status_codes.go | 37 - vendor/go.opencensus.io/trace/trace.go | 598 - vendor/go.opencensus.io/trace/trace_go11.go | 32 - .../go.opencensus.io/trace/trace_nongo11.go | 25 - .../trace/tracestate/tracestate.go | 147 - vendor/golang.org/x/crypto/AUTHORS | 3 - vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/argon2/argon2.go | 285 - vendor/golang.org/x/crypto/argon2/blake2b.go | 53 - .../x/crypto/argon2/blamka_amd64.go | 60 - .../golang.org/x/crypto/argon2/blamka_amd64.s | 243 - .../x/crypto/argon2/blamka_generic.go | 163 - .../golang.org/x/crypto/argon2/blamka_ref.go | 15 - vendor/golang.org/x/crypto/blake2b/blake2b.go | 289 - .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 750 - .../x/crypto/blake2b/blake2b_amd64.go | 24 - .../x/crypto/blake2b/blake2b_amd64.s | 281 - .../x/crypto/blake2b/blake2b_generic.go | 182 - .../x/crypto/blake2b/blake2b_ref.go | 11 - vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 - .../golang.org/x/crypto/blake2b/register.go | 32 - vendor/golang.org/x/crypto/cast5/cast5.go | 533 - .../x/crypto/curve25519/const_amd64.h | 8 - .../x/crypto/curve25519/const_amd64.s | 20 - .../x/crypto/curve25519/cswap_amd64.s | 65 - .../x/crypto/curve25519/curve25519.go | 834 -- vendor/golang.org/x/crypto/curve25519/doc.go | 23 - .../x/crypto/curve25519/freeze_amd64.s | 73 - .../x/crypto/curve25519/ladderstep_amd64.s | 1377 -- .../x/crypto/curve25519/mont25519_amd64.go | 240 - .../x/crypto/curve25519/mul_amd64.s | 169 - .../x/crypto/curve25519/square_amd64.s | 132 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 217 - .../ed25519/internal/edwards25519/const.go | 1422 -- .../internal/edwards25519/edwards25519.go | 1793 --- .../x/crypto/internal/chacha20/asm_arm64.s | 308 - .../crypto/internal/chacha20/chacha_arm64.go | 31 - .../internal/chacha20/chacha_generic.go | 264 - .../crypto/internal/chacha20/chacha_noasm.go | 16 - .../crypto/internal/chacha20/chacha_s390x.go | 29 - .../x/crypto/internal/chacha20/chacha_s390x.s | 260 - .../x/crypto/internal/chacha20/xor.go | 43 - .../x/crypto/internal/subtle/aliasing.go | 32 - .../internal/subtle/aliasing_appengine.go | 35 - .../x/crypto/openpgp/armor/armor.go | 219 - .../x/crypto/openpgp/armor/encode.go | 160 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/elgamal/elgamal.go | 122 - .../x/crypto/openpgp/errors/errors.go | 72 - vendor/golang.org/x/crypto/openpgp/keys.go | 693 - .../x/crypto/openpgp/packet/compressed.go | 123 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 206 - .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 162 - .../x/crypto/openpgp/packet/packet.go | 551 - .../x/crypto/openpgp/packet/private_key.go | 385 - .../x/crypto/openpgp/packet/public_key.go | 753 - .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 - .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../openpgp/packet/symmetrically_encrypted.go | 290 - .../x/crypto/openpgp/packet/userattribute.go | 91 - .../x/crypto/openpgp/packet/userid.go | 160 - vendor/golang.org/x/crypto/openpgp/read.go | 442 - vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 - vendor/golang.org/x/crypto/openpgp/write.go | 418 - vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - .../golang.org/x/crypto/poly1305/mac_noasm.go | 11 - .../golang.org/x/crypto/poly1305/poly1305.go | 83 - .../golang.org/x/crypto/poly1305/sum_amd64.go | 68 - .../golang.org/x/crypto/poly1305/sum_amd64.s | 148 - .../golang.org/x/crypto/poly1305/sum_arm.go | 22 - vendor/golang.org/x/crypto/poly1305/sum_arm.s | 427 - .../x/crypto/poly1305/sum_generic.go | 172 - .../golang.org/x/crypto/poly1305/sum_noasm.go | 16 - .../golang.org/x/crypto/poly1305/sum_s390x.go | 42 - .../golang.org/x/crypto/poly1305/sum_s390x.s | 378 - .../x/crypto/poly1305/sum_vmsl_s390x.s | 909 -- vendor/golang.org/x/crypto/scrypt/scrypt.go | 213 - vendor/golang.org/x/crypto/ssh/buffer.go | 97 - vendor/golang.org/x/crypto/ssh/certs.go | 535 - vendor/golang.org/x/crypto/ssh/channel.go | 633 - vendor/golang.org/x/crypto/ssh/cipher.go | 770 - vendor/golang.org/x/crypto/ssh/client.go | 278 - vendor/golang.org/x/crypto/ssh/client_auth.go | 639 - vendor/golang.org/x/crypto/ssh/common.go | 389 - vendor/golang.org/x/crypto/ssh/connection.go | 143 - vendor/golang.org/x/crypto/ssh/doc.go | 21 - vendor/golang.org/x/crypto/ssh/handshake.go | 647 - vendor/golang.org/x/crypto/ssh/kex.go | 540 - vendor/golang.org/x/crypto/ssh/keys.go | 1100 -- vendor/golang.org/x/crypto/ssh/mac.go | 61 - vendor/golang.org/x/crypto/ssh/messages.go | 836 -- vendor/golang.org/x/crypto/ssh/mux.go | 330 - vendor/golang.org/x/crypto/ssh/server.go | 710 - vendor/golang.org/x/crypto/ssh/session.go | 647 - vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 - vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 - vendor/golang.org/x/crypto/ssh/tcpip.go | 474 - .../x/crypto/ssh/terminal/terminal.go | 966 -- .../golang.org/x/crypto/ssh/terminal/util.go | 114 - .../x/crypto/ssh/terminal/util_aix.go | 12 - .../x/crypto/ssh/terminal/util_bsd.go | 12 - .../x/crypto/ssh/terminal/util_linux.go | 10 - .../x/crypto/ssh/terminal/util_plan9.go | 58 - .../x/crypto/ssh/terminal/util_solaris.go | 124 - .../x/crypto/ssh/terminal/util_windows.go | 105 - vendor/golang.org/x/crypto/ssh/transport.go | 353 - vendor/golang.org/x/net/AUTHORS | 3 - vendor/golang.org/x/net/CONTRIBUTORS | 3 - vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/context/context.go | 56 - .../x/net/context/ctxhttp/ctxhttp.go | 71 - vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 - vendor/golang.org/x/net/context/pre_go19.go | 109 - vendor/golang.org/x/net/http/httpguts/guts.go | 50 - .../golang.org/x/net/http/httpguts/httplex.go | 346 - vendor/golang.org/x/net/http2/.gitignore | 2 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ciphers.go | 641 - .../x/net/http2/client_conn_pool.go | 282 - vendor/golang.org/x/net/http2/databuffer.go | 146 - vendor/golang.org/x/net/http2/errors.go | 133 - vendor/golang.org/x/net/http2/flow.go | 50 - vendor/golang.org/x/net/http2/frame.go | 1614 --- vendor/golang.org/x/net/http2/go111.go | 29 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/headermap.go | 88 - vendor/golang.org/x/net/http2/hpack/encode.go | 240 - vendor/golang.org/x/net/http2/hpack/hpack.go | 504 - .../golang.org/x/net/http2/hpack/huffman.go | 222 - vendor/golang.org/x/net/http2/hpack/tables.go | 479 - vendor/golang.org/x/net/http2/http2.go | 384 - vendor/golang.org/x/net/http2/not_go111.go | 20 - vendor/golang.org/x/net/http2/pipe.go | 163 - vendor/golang.org/x/net/http2/server.go | 2907 ---- vendor/golang.org/x/net/http2/transport.go | 2610 ---- vendor/golang.org/x/net/http2/write.go | 365 - vendor/golang.org/x/net/http2/writesched.go | 242 - .../x/net/http2/writesched_priority.go | 452 - .../x/net/http2/writesched_random.go | 72 - vendor/golang.org/x/net/idna/idna10.0.0.go | 734 - vendor/golang.org/x/net/idna/idna9.0.0.go | 682 - vendor/golang.org/x/net/idna/punycode.go | 203 - vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 ------ vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 ------- vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 ------ vendor/golang.org/x/net/idna/trie.go | 72 - vendor/golang.org/x/net/idna/trieval.go | 119 - .../x/net/internal/timeseries/timeseries.go | 525 - vendor/golang.org/x/net/publicsuffix/list.go | 181 - vendor/golang.org/x/net/publicsuffix/table.go | 9829 ------------- vendor/golang.org/x/net/trace/events.go | 532 - vendor/golang.org/x/net/trace/histogram.go | 365 - vendor/golang.org/x/net/trace/trace.go | 1130 -- vendor/golang.org/x/oauth2/.travis.yml | 13 - vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/golang.org/x/oauth2/LICENSE | 27 - vendor/golang.org/x/oauth2/README.md | 35 - vendor/golang.org/x/oauth2/go.mod | 10 - vendor/golang.org/x/oauth2/go.sum | 12 - .../golang.org/x/oauth2/google/appengine.go | 38 - .../x/oauth2/google/appengine_gen1.go | 77 - .../x/oauth2/google/appengine_gen2_flex.go | 27 - vendor/golang.org/x/oauth2/google/default.go | 154 - vendor/golang.org/x/oauth2/google/doc.go | 40 - vendor/golang.org/x/oauth2/google/google.go | 202 - vendor/golang.org/x/oauth2/google/jwt.go | 74 - vendor/golang.org/x/oauth2/google/sdk.go | 201 - .../x/oauth2/internal/client_appengine.go | 13 - vendor/golang.org/x/oauth2/internal/doc.go | 6 - vendor/golang.org/x/oauth2/internal/oauth2.go | 37 - vendor/golang.org/x/oauth2/internal/token.go | 294 - .../golang.org/x/oauth2/internal/transport.go | 33 - vendor/golang.org/x/oauth2/jws/jws.go | 182 - vendor/golang.org/x/oauth2/jwt/jwt.go | 170 - vendor/golang.org/x/oauth2/oauth2.go | 381 - vendor/golang.org/x/oauth2/token.go | 178 - vendor/golang.org/x/oauth2/transport.go | 144 - vendor/golang.org/x/sync/AUTHORS | 3 - vendor/golang.org/x/sync/CONTRIBUTORS | 3 - vendor/golang.org/x/sync/LICENSE | 27 - vendor/golang.org/x/sync/PATENTS | 22 - vendor/golang.org/x/sync/errgroup/errgroup.go | 66 - vendor/golang.org/x/sys/AUTHORS | 3 - vendor/golang.org/x/sys/CONTRIBUTORS | 3 - vendor/golang.org/x/sys/LICENSE | 27 - vendor/golang.org/x/sys/PATENTS | 22 - vendor/golang.org/x/sys/cpu/byteorder.go | 30 - vendor/golang.org/x/sys/cpu/cpu.go | 126 - vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go | 30 - vendor/golang.org/x/sys/cpu/cpu_arm.go | 9 - vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 - vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 - vendor/golang.org/x/sys/cpu/cpu_gccgo.c | 43 - vendor/golang.org/x/sys/cpu/cpu_gccgo.go | 26 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 - vendor/golang.org/x/sys/cpu/cpu_linux.go | 59 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 67 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 33 - .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 161 - vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 11 - vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 11 - vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 - vendor/golang.org/x/sys/cpu/cpu_wasm.go | 15 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 59 - vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 - vendor/golang.org/x/sys/unix/.gitignore | 2 - vendor/golang.org/x/sys/unix/README.md | 173 - .../golang.org/x/sys/unix/affinity_linux.go | 124 - vendor/golang.org/x/sys/unix/aliases.go | 14 - vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 17 - vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 - .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 - .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 - .../x/sys/unix/asm_dragonfly_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 57 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 52 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 56 - vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 - .../golang.org/x/sys/unix/bluetooth_linux.go | 35 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 - vendor/golang.org/x/sys/unix/constants.go | 13 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 - vendor/golang.org/x/sys/unix/dev_darwin.go | 24 - vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 - vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 - vendor/golang.org/x/sys/unix/dev_linux.go | 42 - vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 - vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 - vendor/golang.org/x/sys/unix/dirent.go | 17 - vendor/golang.org/x/sys/unix/endian_big.go | 9 - vendor/golang.org/x/sys/unix/endian_little.go | 9 - vendor/golang.org/x/sys/unix/env_unix.go | 31 - .../x/sys/unix/errors_freebsd_386.go | 227 - .../x/sys/unix/errors_freebsd_amd64.go | 227 - .../x/sys/unix/errors_freebsd_arm.go | 226 - vendor/golang.org/x/sys/unix/fcntl.go | 32 - vendor/golang.org/x/sys/unix/fcntl_darwin.go | 18 - .../x/sys/unix/fcntl_linux_32bit.go | 13 - vendor/golang.org/x/sys/unix/gccgo.go | 62 - vendor/golang.org/x/sys/unix/gccgo_c.c | 39 - .../x/sys/unix/gccgo_linux_amd64.go | 20 - vendor/golang.org/x/sys/unix/ioctl.go | 30 - vendor/golang.org/x/sys/unix/mkall.sh | 212 - vendor/golang.org/x/sys/unix/mkerrors.sh | 659 - .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 265 - .../golang.org/x/sys/unix/openbsd_pledge.go | 166 - .../golang.org/x/sys/unix/openbsd_unveil.go | 44 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 - vendor/golang.org/x/sys/unix/race.go | 30 - vendor/golang.org/x/sys/unix/race0.go | 25 - .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 120 - vendor/golang.org/x/sys/unix/str.go | 26 - vendor/golang.org/x/sys/unix/syscall.go | 54 - vendor/golang.org/x/sys/unix/syscall_aix.go | 557 - .../golang.org/x/sys/unix/syscall_aix_ppc.go | 34 - .../x/sys/unix/syscall_aix_ppc64.go | 34 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 624 - .../golang.org/x/sys/unix/syscall_darwin.go | 706 - .../x/sys/unix/syscall_darwin_386.go | 63 - .../x/sys/unix/syscall_darwin_amd64.go | 63 - .../x/sys/unix/syscall_darwin_arm.go | 64 - .../x/sys/unix/syscall_darwin_arm64.go | 66 - .../x/sys/unix/syscall_darwin_libSystem.go | 31 - .../x/sys/unix/syscall_dragonfly.go | 539 - .../x/sys/unix/syscall_dragonfly_amd64.go | 52 - .../golang.org/x/sys/unix/syscall_freebsd.go | 824 -- .../x/sys/unix/syscall_freebsd_386.go | 52 - .../x/sys/unix/syscall_freebsd_amd64.go | 52 - .../x/sys/unix/syscall_freebsd_arm.go | 52 - .../x/sys/unix/syscall_freebsd_arm64.go | 52 - vendor/golang.org/x/sys/unix/syscall_linux.go | 1784 --- .../x/sys/unix/syscall_linux_386.go | 386 - .../x/sys/unix/syscall_linux_amd64.go | 190 - .../x/sys/unix/syscall_linux_amd64_gc.go | 13 - .../x/sys/unix/syscall_linux_arm.go | 274 - .../x/sys/unix/syscall_linux_arm64.go | 223 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 - .../x/sys/unix/syscall_linux_gc_386.go | 16 - .../x/sys/unix/syscall_linux_gccgo_386.go | 30 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 20 - .../x/sys/unix/syscall_linux_mips64x.go | 222 - .../x/sys/unix/syscall_linux_mipsx.go | 234 - .../x/sys/unix/syscall_linux_ppc64x.go | 152 - .../x/sys/unix/syscall_linux_riscv64.go | 226 - .../x/sys/unix/syscall_linux_s390x.go | 338 - .../x/sys/unix/syscall_linux_sparc64.go | 147 - .../golang.org/x/sys/unix/syscall_netbsd.go | 622 - .../x/sys/unix/syscall_netbsd_386.go | 33 - .../x/sys/unix/syscall_netbsd_amd64.go | 33 - .../x/sys/unix/syscall_netbsd_arm.go | 33 - .../x/sys/unix/syscall_netbsd_arm64.go | 33 - .../golang.org/x/sys/unix/syscall_openbsd.go | 416 - .../x/sys/unix/syscall_openbsd_386.go | 37 - .../x/sys/unix/syscall_openbsd_amd64.go | 37 - .../x/sys/unix/syscall_openbsd_arm.go | 37 - .../golang.org/x/sys/unix/syscall_solaris.go | 737 - .../x/sys/unix/syscall_solaris_amd64.go | 23 - vendor/golang.org/x/sys/unix/syscall_unix.go | 431 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 - vendor/golang.org/x/sys/unix/timestruct.go | 82 - vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1374 -- .../x/sys/unix/zerrors_aix_ppc64.go | 1375 -- .../x/sys/unix/zerrors_darwin_386.go | 1783 --- .../x/sys/unix/zerrors_darwin_amd64.go | 1783 --- .../x/sys/unix/zerrors_darwin_arm.go | 1783 --- .../x/sys/unix/zerrors_darwin_arm64.go | 1783 --- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1650 --- .../x/sys/unix/zerrors_freebsd_386.go | 1793 --- .../x/sys/unix/zerrors_freebsd_amd64.go | 1794 --- .../x/sys/unix/zerrors_freebsd_arm.go | 1802 --- .../x/sys/unix/zerrors_freebsd_arm64.go | 1794 --- .../x/sys/unix/zerrors_linux_386.go | 2835 ---- .../x/sys/unix/zerrors_linux_amd64.go | 2835 ---- .../x/sys/unix/zerrors_linux_arm.go | 2841 ---- .../x/sys/unix/zerrors_linux_arm64.go | 2826 ---- .../x/sys/unix/zerrors_linux_mips.go | 2842 ---- .../x/sys/unix/zerrors_linux_mips64.go | 2842 ---- .../x/sys/unix/zerrors_linux_mips64le.go | 2842 ---- .../x/sys/unix/zerrors_linux_mipsle.go | 2842 ---- .../x/sys/unix/zerrors_linux_ppc64.go | 2897 ---- .../x/sys/unix/zerrors_linux_ppc64le.go | 2897 ---- .../x/sys/unix/zerrors_linux_riscv64.go | 2822 ---- .../x/sys/unix/zerrors_linux_s390x.go | 2895 ---- .../x/sys/unix/zerrors_linux_sparc64.go | 2891 ---- .../x/sys/unix/zerrors_netbsd_386.go | 1772 --- .../x/sys/unix/zerrors_netbsd_amd64.go | 1762 --- .../x/sys/unix/zerrors_netbsd_arm.go | 1751 --- .../x/sys/unix/zerrors_netbsd_arm64.go | 1762 --- .../x/sys/unix/zerrors_openbsd_386.go | 1654 --- .../x/sys/unix/zerrors_openbsd_amd64.go | 1765 --- .../x/sys/unix/zerrors_openbsd_arm.go | 1656 --- .../x/sys/unix/zerrors_solaris_amd64.go | 1532 -- .../golang.org/x/sys/unix/zptrace386_linux.go | 80 - .../golang.org/x/sys/unix/zptracearm_linux.go | 41 - .../x/sys/unix/zptracemips_linux.go | 50 - .../x/sys/unix/zptracemipsle_linux.go | 50 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1484 -- .../x/sys/unix/zsyscall_aix_ppc64.go | 1442 -- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 -- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 -- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1810 --- .../x/sys/unix/zsyscall_darwin_386.go | 2505 ---- .../x/sys/unix/zsyscall_darwin_386.s | 284 - .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1810 --- .../x/sys/unix/zsyscall_darwin_amd64.go | 2520 ---- .../x/sys/unix/zsyscall_darwin_amd64.s | 286 - .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1793 --- .../x/sys/unix/zsyscall_darwin_arm.go | 2483 ---- .../x/sys/unix/zsyscall_darwin_arm.s | 282 - .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1793 --- .../x/sys/unix/zsyscall_darwin_arm64.go | 2483 ---- .../x/sys/unix/zsyscall_darwin_arm64.s | 282 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1659 --- .../x/sys/unix/zsyscall_freebsd_386.go | 2015 --- .../x/sys/unix/zsyscall_freebsd_amd64.go | 2015 --- .../x/sys/unix/zsyscall_freebsd_arm.go | 2015 --- .../x/sys/unix/zsyscall_freebsd_arm64.go | 2015 --- .../x/sys/unix/zsyscall_linux_386.go | 2246 --- .../x/sys/unix/zsyscall_linux_amd64.go | 2413 ---- .../x/sys/unix/zsyscall_linux_arm.go | 2368 ---- .../x/sys/unix/zsyscall_linux_arm64.go | 2270 --- .../x/sys/unix/zsyscall_linux_mips.go | 2426 ---- .../x/sys/unix/zsyscall_linux_mips64.go | 2397 ---- .../x/sys/unix/zsyscall_linux_mips64le.go | 2397 ---- .../x/sys/unix/zsyscall_linux_mipsle.go | 2426 ---- .../x/sys/unix/zsyscall_linux_ppc64.go | 2475 ---- .../x/sys/unix/zsyscall_linux_ppc64le.go | 2475 ---- .../x/sys/unix/zsyscall_linux_riscv64.go | 2250 --- .../x/sys/unix/zsyscall_linux_s390x.go | 2245 --- .../x/sys/unix/zsyscall_linux_sparc64.go | 2408 ---- .../x/sys/unix/zsyscall_netbsd_386.go | 1826 --- .../x/sys/unix/zsyscall_netbsd_amd64.go | 1826 --- .../x/sys/unix/zsyscall_netbsd_arm.go | 1826 --- .../x/sys/unix/zsyscall_netbsd_arm64.go | 1826 --- .../x/sys/unix/zsyscall_openbsd_386.go | 1692 --- .../x/sys/unix/zsyscall_openbsd_amd64.go | 1692 --- .../x/sys/unix/zsyscall_openbsd_arm.go | 1692 --- .../x/sys/unix/zsyscall_solaris_amd64.go | 1953 --- .../x/sys/unix/zsysctl_openbsd_386.go | 270 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 270 - .../x/sys/unix/zsysctl_openbsd_arm.go | 270 - .../x/sys/unix/zsysnum_darwin_386.go | 436 - .../x/sys/unix/zsysnum_darwin_amd64.go | 438 - .../x/sys/unix/zsysnum_darwin_arm.go | 436 - .../x/sys/unix/zsysnum_darwin_arm64.go | 436 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 - .../x/sys/unix/zsysnum_freebsd_386.go | 403 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 403 - .../x/sys/unix/zsysnum_freebsd_arm.go | 403 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 395 - .../x/sys/unix/zsysnum_linux_386.go | 392 - .../x/sys/unix/zsysnum_linux_amd64.go | 344 - .../x/sys/unix/zsysnum_linux_arm.go | 364 - .../x/sys/unix/zsysnum_linux_arm64.go | 289 - .../x/sys/unix/zsysnum_linux_mips.go | 377 - .../x/sys/unix/zsysnum_linux_mips64.go | 337 - .../x/sys/unix/zsysnum_linux_mips64le.go | 337 - .../x/sys/unix/zsysnum_linux_mipsle.go | 377 - .../x/sys/unix/zsysnum_linux_ppc64.go | 375 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 375 - .../x/sys/unix/zsysnum_linux_riscv64.go | 288 - .../x/sys/unix/zsysnum_linux_s390x.go | 337 - .../x/sys/unix/zsysnum_linux_sparc64.go | 351 - .../x/sys/unix/zsysnum_netbsd_386.go | 274 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 274 - .../x/sys/unix/zsysnum_netbsd_arm.go | 274 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 274 - .../x/sys/unix/zsysnum_openbsd_386.go | 218 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 218 - .../x/sys/unix/zsysnum_openbsd_arm.go | 218 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 345 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 350 - .../x/sys/unix/ztypes_darwin_386.go | 499 - .../x/sys/unix/ztypes_darwin_amd64.go | 509 - .../x/sys/unix/ztypes_darwin_arm.go | 500 - .../x/sys/unix/ztypes_darwin_arm64.go | 509 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 469 - .../x/sys/unix/ztypes_freebsd_386.go | 603 - .../x/sys/unix/ztypes_freebsd_amd64.go | 602 - .../x/sys/unix/ztypes_freebsd_arm.go | 602 - .../x/sys/unix/ztypes_freebsd_arm64.go | 602 - .../golang.org/x/sys/unix/ztypes_linux_386.go | 2135 --- .../x/sys/unix/ztypes_linux_amd64.go | 2148 --- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2126 --- .../x/sys/unix/ztypes_linux_arm64.go | 2127 --- .../x/sys/unix/ztypes_linux_mips.go | 2132 --- .../x/sys/unix/ztypes_linux_mips64.go | 2129 --- .../x/sys/unix/ztypes_linux_mips64le.go | 2129 --- .../x/sys/unix/ztypes_linux_mipsle.go | 2132 --- .../x/sys/unix/ztypes_linux_ppc64.go | 2137 --- .../x/sys/unix/ztypes_linux_ppc64le.go | 2137 --- .../x/sys/unix/ztypes_linux_riscv64.go | 2154 --- .../x/sys/unix/ztypes_linux_s390x.go | 2151 --- .../x/sys/unix/ztypes_linux_sparc64.go | 2132 --- .../x/sys/unix/ztypes_netbsd_386.go | 465 - .../x/sys/unix/ztypes_netbsd_amd64.go | 472 - .../x/sys/unix/ztypes_netbsd_arm.go | 470 - .../x/sys/unix/ztypes_netbsd_arm64.go | 472 - .../x/sys/unix/ztypes_openbsd_386.go | 570 - .../x/sys/unix/ztypes_openbsd_amd64.go | 570 - .../x/sys/unix/ztypes_openbsd_arm.go | 571 - .../x/sys/unix/ztypes_solaris_amd64.go | 442 - vendor/golang.org/x/sys/windows/aliases.go | 13 - .../x/sys/windows/asm_windows_386.s | 13 - .../x/sys/windows/asm_windows_amd64.s | 13 - .../x/sys/windows/asm_windows_arm.s | 11 - .../golang.org/x/sys/windows/dll_windows.go | 378 - .../golang.org/x/sys/windows/env_windows.go | 29 - vendor/golang.org/x/sys/windows/eventlog.go | 20 - .../golang.org/x/sys/windows/exec_windows.go | 97 - .../x/sys/windows/memory_windows.go | 26 - vendor/golang.org/x/sys/windows/mkerrors.bash | 64 - vendor/golang.org/x/sys/windows/mkerrors.go | 7 - vendor/golang.org/x/sys/windows/mksyscall.go | 7 - vendor/golang.org/x/sys/windows/race.go | 30 - vendor/golang.org/x/sys/windows/race0.go | 25 - .../x/sys/windows/security_windows.go | 649 - vendor/golang.org/x/sys/windows/service.go | 181 - vendor/golang.org/x/sys/windows/str.go | 22 - vendor/golang.org/x/sys/windows/syscall.go | 74 - .../x/sys/windows/syscall_windows.go | 1220 -- .../golang.org/x/sys/windows/types_windows.go | 1441 -- .../x/sys/windows/types_windows_386.go | 22 - .../x/sys/windows/types_windows_amd64.go | 22 - .../x/sys/windows/types_windows_arm.go | 22 - .../x/sys/windows/zerrors_windows.go | 6853 --------- .../x/sys/windows/zsyscall_windows.go | 2779 ---- vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/LICENSE | 27 - vendor/golang.org/x/text/PATENTS | 22 - vendor/golang.org/x/text/encoding/encoding.go | 335 - .../internal/identifier/identifier.go | 81 - .../text/encoding/internal/identifier/mib.go | 1621 --- .../x/text/encoding/internal/internal.go | 75 - .../x/text/encoding/unicode/override.go | 82 - .../x/text/encoding/unicode/unicode.go | 434 - .../internal/utf8internal/utf8internal.go | 87 - vendor/golang.org/x/text/runes/cond.go | 187 - vendor/golang.org/x/text/runes/runes.go | 355 - .../x/text/secure/bidirule/bidirule.go | 336 - .../x/text/secure/bidirule/bidirule10.0.0.go | 11 - .../x/text/secure/bidirule/bidirule9.0.0.go | 14 - .../golang.org/x/text/transform/transform.go | 705 - vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 - .../golang.org/x/text/unicode/bidi/bracket.go | 335 - vendor/golang.org/x/text/unicode/bidi/core.go | 1058 -- vendor/golang.org/x/text/unicode/bidi/prop.go | 206 - .../x/text/unicode/bidi/tables10.0.0.go | 1815 --- .../x/text/unicode/bidi/tables9.0.0.go | 1781 --- .../golang.org/x/text/unicode/bidi/trieval.go | 60 - .../x/text/unicode/norm/composition.go | 508 - .../x/text/unicode/norm/forminfo.go | 273 - .../golang.org/x/text/unicode/norm/input.go | 109 - vendor/golang.org/x/text/unicode/norm/iter.go | 457 - .../x/text/unicode/norm/normalize.go | 609 - .../x/text/unicode/norm/readwriter.go | 125 - .../x/text/unicode/norm/tables10.0.0.go | 7657 ---------- .../x/text/unicode/norm/tables9.0.0.go | 7637 ---------- .../x/text/unicode/norm/transform.go | 88 - vendor/golang.org/x/text/unicode/norm/trie.go | 54 - vendor/google.golang.org/api/AUTHORS | 10 - vendor/google.golang.org/api/CONTRIBUTORS | 55 - vendor/google.golang.org/api/LICENSE | 27 - .../api/gensupport/backoff.go | 51 - .../api/gensupport/buffer.go | 79 - .../google.golang.org/api/gensupport/doc.go | 10 - .../api/gensupport/header.go | 22 - .../google.golang.org/api/gensupport/json.go | 211 - .../api/gensupport/jsonfloat.go | 57 - .../google.golang.org/api/gensupport/media.go | 363 - .../api/gensupport/params.go | 51 - .../api/gensupport/resumable.go | 216 - .../google.golang.org/api/gensupport/retry.go | 84 - .../google.golang.org/api/gensupport/send.go | 87 - .../api/googleapi/googleapi.go | 403 - .../googleapi/internal/uritemplates/LICENSE | 18 - .../internal/uritemplates/uritemplates.go | 248 - .../googleapi/internal/uritemplates/utils.go | 17 - .../api/googleapi/transport/apikey.go | 38 - .../google.golang.org/api/googleapi/types.go | 202 - .../google.golang.org/api/internal/creds.go | 102 - vendor/google.golang.org/api/internal/pool.go | 61 - .../api/internal/service-account.json | 12 - .../api/internal/settings.go | 96 - .../api/option/credentials_go19.go | 33 - .../api/option/credentials_notgo19.go | 32 - vendor/google.golang.org/api/option/option.go | 235 - .../api/storage/v1/storage-api.json | 3848 ----- .../api/storage/v1/storage-gen.go | 11588 ---------------- .../api/transport/http/dial.go | 161 - .../api/transport/http/dial_appengine.go | 30 - .../http/internal/propagation/http.go | 96 - .../google.golang.org/appengine/.travis.yml | 20 - .../appengine/CONTRIBUTING.md | 90 - vendor/google.golang.org/appengine/LICENSE | 202 - vendor/google.golang.org/appengine/README.md | 73 - .../google.golang.org/appengine/appengine.go | 135 - .../appengine/appengine_vm.go | 20 - vendor/google.golang.org/appengine/errors.go | 46 - vendor/google.golang.org/appengine/go.mod | 7 - vendor/google.golang.org/appengine/go.sum | 6 - .../google.golang.org/appengine/identity.go | 142 - .../appengine/internal/api.go | 675 - .../appengine/internal/api_classic.go | 169 - .../appengine/internal/api_common.go | 123 - .../appengine/internal/app_id.go | 28 - .../app_identity/app_identity_service.pb.go | 611 - .../app_identity/app_identity_service.proto | 64 - .../appengine/internal/base/api_base.pb.go | 308 - .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 4367 ------ .../internal/datastore/datastore_v3.proto | 551 - .../appengine/internal/identity.go | 55 - .../appengine/internal/identity_classic.go | 61 - .../appengine/internal/identity_flex.go | 11 - .../appengine/internal/identity_vm.go | 134 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 1313 -- .../appengine/internal/log/log_service.proto | 150 - .../appengine/internal/main.go | 16 - .../appengine/internal/main_common.go | 7 - .../appengine/internal/main_vm.go | 69 - .../appengine/internal/metadata.go | 60 - .../internal/modules/modules_service.pb.go | 786 -- .../internal/modules/modules_service.proto | 80 - .../appengine/internal/net.go | 56 - .../appengine/internal/regen.sh | 40 - .../internal/remote_api/remote_api.pb.go | 361 - .../internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 115 - .../internal/urlfetch/urlfetch_service.pb.go | 527 - .../internal/urlfetch/urlfetch_service.proto | 64 - .../google.golang.org/appengine/namespace.go | 25 - vendor/google.golang.org/appengine/timeout.go | 20 - .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 210 - vendor/google.golang.org/genproto/LICENSE | 202 - .../googleapis/rpc/status/status.pb.go | 159 - vendor/google.golang.org/grpc/.travis.yml | 39 - vendor/google.golang.org/grpc/AUTHORS | 1 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 37 - vendor/google.golang.org/grpc/LICENSE | 202 - vendor/google.golang.org/grpc/Makefile | 60 - vendor/google.golang.org/grpc/README.md | 67 - vendor/google.golang.org/grpc/backoff.go | 38 - vendor/google.golang.org/grpc/balancer.go | 391 - .../grpc/balancer/balancer.go | 336 - .../grpc/balancer/base/balancer.go | 178 - .../grpc/balancer/base/base.go | 64 - .../grpc/balancer/roundrobin/roundrobin.go | 83 - .../grpc/balancer_conn_wrappers.go | 315 - .../grpc/balancer_v1_wrapper.go | 341 - .../grpc_binarylog_v1/binarylog.pb.go | 900 -- vendor/google.golang.org/grpc/call.go | 74 - vendor/google.golang.org/grpc/clientconn.go | 1356 -- vendor/google.golang.org/grpc/codec.go | 50 - vendor/google.golang.org/grpc/codegen.sh | 17 - .../grpc/codes/code_string.go | 62 - vendor/google.golang.org/grpc/codes/codes.go | 197 - .../grpc/connectivity/connectivity.go | 73 - .../grpc/credentials/credentials.go | 338 - .../grpc/credentials/internal/syscallconn.go | 61 - .../internal/syscallconn_appengine.go | 30 - .../grpc/credentials/tls13.go | 30 - vendor/google.golang.org/grpc/dialoptions.go | 532 - vendor/google.golang.org/grpc/doc.go | 24 - .../grpc/encoding/encoding.go | 118 - .../grpc/encoding/proto/proto.go | 110 - vendor/google.golang.org/grpc/go.mod | 19 - vendor/google.golang.org/grpc/go.sum | 33 - .../google.golang.org/grpc/grpclog/grpclog.go | 126 - .../google.golang.org/grpc/grpclog/logger.go | 85 - .../grpc/grpclog/loggerv2.go | 195 - vendor/google.golang.org/grpc/install_gae.sh | 6 - vendor/google.golang.org/grpc/interceptor.go | 77 - .../grpc/internal/backoff/backoff.go | 78 - .../grpc/internal/balancerload/load.go | 46 - .../grpc/internal/binarylog/binarylog.go | 167 - .../internal/binarylog/binarylog_testutil.go | 42 - .../grpc/internal/binarylog/env_config.go | 210 - .../grpc/internal/binarylog/method_logger.go | 423 - .../grpc/internal/binarylog/regenerate.sh | 33 - .../grpc/internal/binarylog/sink.go | 162 - .../grpc/internal/binarylog/util.go | 41 - .../grpc/internal/channelz/funcs.go | 699 - .../grpc/internal/channelz/types.go | 702 - .../grpc/internal/channelz/types_linux.go | 53 - .../grpc/internal/channelz/types_nonlinux.go | 44 - .../grpc/internal/channelz/util_linux.go | 39 - .../grpc/internal/channelz/util_nonlinux.go | 26 - .../grpc/internal/envconfig/envconfig.go | 64 - .../grpc/internal/grpcrand/grpcrand.go | 56 - .../grpc/internal/grpcsync/event.go | 61 - .../grpc/internal/internal.go | 54 - .../grpc/internal/syscall/syscall_linux.go | 114 - .../grpc/internal/syscall/syscall_nonlinux.go | 73 - .../grpc/internal/transport/bdp_estimator.go | 141 - .../grpc/internal/transport/controlbuf.go | 852 -- .../grpc/internal/transport/defaults.go | 49 - .../grpc/internal/transport/flowcontrol.go | 218 - .../grpc/internal/transport/handler_server.go | 430 - .../grpc/internal/transport/http2_client.go | 1397 -- .../grpc/internal/transport/http2_server.go | 1214 -- .../grpc/internal/transport/http_util.go | 676 - .../grpc/internal/transport/log.go | 44 - .../grpc/internal/transport/transport.go | 760 - .../grpc/keepalive/keepalive.go | 85 - .../grpc/metadata/metadata.go | 209 - .../grpc/naming/dns_resolver.go | 293 - .../google.golang.org/grpc/naming/naming.go | 69 - vendor/google.golang.org/grpc/peer/peer.go | 51 - .../google.golang.org/grpc/picker_wrapper.go | 189 - vendor/google.golang.org/grpc/pickfirst.go | 110 - vendor/google.golang.org/grpc/proxy.go | 152 - .../grpc/resolver/dns/dns_resolver.go | 438 - .../grpc/resolver/passthrough/passthrough.go | 57 - .../grpc/resolver/resolver.go | 173 - .../grpc/resolver_conn_wrapper.go | 165 - vendor/google.golang.org/grpc/rpc_util.go | 843 -- vendor/google.golang.org/grpc/server.go | 1498 -- .../google.golang.org/grpc/service_config.go | 373 - .../google.golang.org/grpc/stats/handlers.go | 63 - vendor/google.golang.org/grpc/stats/stats.go | 300 - .../google.golang.org/grpc/status/status.go | 210 - vendor/google.golang.org/grpc/stream.go | 1498 -- vendor/google.golang.org/grpc/tap/tap.go | 51 - vendor/google.golang.org/grpc/trace.go | 126 - vendor/google.golang.org/grpc/version.go | 22 - vendor/google.golang.org/grpc/vet.sh | 129 - vendor/gopkg.in/ini.v1/.gitignore | 6 - vendor/gopkg.in/ini.v1/.travis.yml | 17 - vendor/gopkg.in/ini.v1/LICENSE | 191 - vendor/gopkg.in/ini.v1/Makefile | 15 - vendor/gopkg.in/ini.v1/README.md | 46 - vendor/gopkg.in/ini.v1/error.go | 32 - vendor/gopkg.in/ini.v1/file.go | 418 - vendor/gopkg.in/ini.v1/ini.go | 219 - vendor/gopkg.in/ini.v1/key.go | 752 - vendor/gopkg.in/ini.v1/parser.go | 488 - vendor/gopkg.in/ini.v1/section.go | 259 - vendor/gopkg.in/ini.v1/struct.go | 512 - vendor/gopkg.in/tomb.v2/LICENSE | 29 - vendor/gopkg.in/tomb.v2/README.md | 4 - vendor/gopkg.in/tomb.v2/context.go | 74 - vendor/gopkg.in/tomb.v2/context16.go | 74 - vendor/gopkg.in/tomb.v2/tomb.go | 237 - vendor/gopkg.in/yaml.v2/.travis.yml | 12 - vendor/gopkg.in/yaml.v2/LICENSE | 201 - vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - vendor/gopkg.in/yaml.v2/NOTICE | 13 - vendor/gopkg.in/yaml.v2/README.md | 133 - vendor/gopkg.in/yaml.v2/apic.go | 739 - vendor/gopkg.in/yaml.v2/decode.go | 775 -- vendor/gopkg.in/yaml.v2/emitterc.go | 1685 --- vendor/gopkg.in/yaml.v2/encode.go | 390 - vendor/gopkg.in/yaml.v2/go.mod | 5 - vendor/gopkg.in/yaml.v2/parserc.go | 1095 -- vendor/gopkg.in/yaml.v2/readerc.go | 412 - vendor/gopkg.in/yaml.v2/resolve.go | 258 - vendor/gopkg.in/yaml.v2/scannerc.go | 2696 ---- vendor/gopkg.in/yaml.v2/sorter.go | 113 - vendor/gopkg.in/yaml.v2/writerc.go | 26 - vendor/gopkg.in/yaml.v2/yaml.go | 466 - vendor/gopkg.in/yaml.v2/yamlh.go | 738 - vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 - vendor/modules.txt | 223 - 1346 files changed, 503289 deletions(-) delete mode 100644 vendor/bazil.org/fuse/.gitattributes delete mode 100644 vendor/bazil.org/fuse/.gitignore delete mode 100644 vendor/bazil.org/fuse/LICENSE delete mode 100644 vendor/bazil.org/fuse/README.md delete mode 100644 vendor/bazil.org/fuse/buffer.go delete mode 100644 vendor/bazil.org/fuse/debug.go delete mode 100644 vendor/bazil.org/fuse/error_darwin.go delete mode 100644 vendor/bazil.org/fuse/error_freebsd.go delete mode 100644 vendor/bazil.org/fuse/error_linux.go delete mode 100644 vendor/bazil.org/fuse/error_std.go delete mode 100644 vendor/bazil.org/fuse/fs/serve.go delete mode 100644 vendor/bazil.org/fuse/fs/tree.go delete mode 100644 vendor/bazil.org/fuse/fuse.go delete mode 100644 vendor/bazil.org/fuse/fuse_darwin.go delete mode 100644 vendor/bazil.org/fuse/fuse_freebsd.go delete mode 100644 vendor/bazil.org/fuse/fuse_kernel.go delete mode 100644 vendor/bazil.org/fuse/fuse_kernel_darwin.go delete mode 100644 vendor/bazil.org/fuse/fuse_kernel_freebsd.go delete mode 100644 vendor/bazil.org/fuse/fuse_kernel_linux.go delete mode 100644 vendor/bazil.org/fuse/fuse_kernel_std.go delete mode 100644 vendor/bazil.org/fuse/fuse_linux.go delete mode 100644 vendor/bazil.org/fuse/fuseutil/fuseutil.go delete mode 100644 vendor/bazil.org/fuse/mount.go delete mode 100644 vendor/bazil.org/fuse/mount_darwin.go delete mode 100644 vendor/bazil.org/fuse/mount_freebsd.go delete mode 100644 vendor/bazil.org/fuse/mount_linux.go delete mode 100644 vendor/bazil.org/fuse/options.go delete mode 100644 vendor/bazil.org/fuse/options_darwin.go delete mode 100644 vendor/bazil.org/fuse/options_freebsd.go delete mode 100644 vendor/bazil.org/fuse/options_linux.go delete mode 100644 vendor/bazil.org/fuse/protocol.go delete mode 100644 vendor/bazil.org/fuse/unmount.go delete mode 100644 vendor/bazil.org/fuse/unmount_linux.go delete mode 100644 vendor/bazil.org/fuse/unmount_std.go delete mode 100644 vendor/cloud.google.com/go/AUTHORS delete mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS delete mode 100644 vendor/cloud.google.com/go/LICENSE delete mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/LICENSE delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/NOTICE delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/container.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/file.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/message.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/share.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/version/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.mod delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.sum delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go.mod delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.mod delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.sum delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/logger/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/logger/go.mod delete mode 100644 vendor/github.com/Azure/go-autorest/logger/logger.go delete mode 100644 vendor/github.com/Azure/go-autorest/tracing/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/tracing/go.mod delete mode 100644 vendor/github.com/Azure/go-autorest/tracing/tracing.go delete mode 100644 vendor/github.com/cenkalti/backoff/.gitignore delete mode 100644 vendor/github.com/cenkalti/backoff/.travis.yml delete mode 100644 vendor/github.com/cenkalti/backoff/LICENSE delete mode 100644 vendor/github.com/cenkalti/backoff/README.md delete mode 100644 vendor/github.com/cenkalti/backoff/backoff.go delete mode 100644 vendor/github.com/cenkalti/backoff/context.go delete mode 100644 vendor/github.com/cenkalti/backoff/exponential.go delete mode 100644 vendor/github.com/cenkalti/backoff/retry.go delete mode 100644 vendor/github.com/cenkalti/backoff/ticker.go delete mode 100644 vendor/github.com/cenkalti/backoff/tries.go delete mode 100644 vendor/github.com/cespare/xxhash/LICENSE.txt delete mode 100644 vendor/github.com/cespare/xxhash/README.md delete mode 100644 vendor/github.com/cespare/xxhash/go.mod delete mode 100644 vendor/github.com/cespare/xxhash/go.sum delete mode 100644 vendor/github.com/cespare/xxhash/rotate.go delete mode 100644 vendor/github.com/cespare/xxhash/rotate19.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.s delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_other.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_unsafe.go delete mode 100644 vendor/github.com/cpuguy83/go-md2man/LICENSE.md delete mode 100644 vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go delete mode 100644 vendor/github.com/cpuguy83/go-md2man/md2man/roff.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.gitignore delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.travis.yml delete mode 100644 vendor/github.com/dgrijalva/jwt-go/LICENSE delete mode 100644 vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/README.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/doc.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/errors.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/hmac.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/map_claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/none.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/parser.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_pss.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_utils.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/signing_method.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/token.go delete mode 100644 vendor/github.com/elithrar/simple-scrypt/.gitignore delete mode 100644 vendor/github.com/elithrar/simple-scrypt/.travis.yml delete mode 100644 vendor/github.com/elithrar/simple-scrypt/LICENSE delete mode 100644 vendor/github.com/elithrar/simple-scrypt/README.md delete mode 100644 vendor/github.com/elithrar/simple-scrypt/scrypt.go delete mode 100644 vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100644 vendor/github.com/google/go-cmp/LICENSE delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/format.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/options.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/path.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/reporter.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/unsafe_panic.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE delete mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go delete mode 100644 vendor/github.com/juju/ratelimit/LICENSE delete mode 100644 vendor/github.com/juju/ratelimit/README.md delete mode 100644 vendor/github.com/juju/ratelimit/ratelimit.go delete mode 100644 vendor/github.com/juju/ratelimit/reader.go delete mode 100644 vendor/github.com/kr/fs/LICENSE delete mode 100644 vendor/github.com/kr/fs/Readme delete mode 100644 vendor/github.com/kr/fs/filesystem.go delete mode 100644 vendor/github.com/kr/fs/go.mod delete mode 100644 vendor/github.com/kr/fs/walk.go delete mode 100644 vendor/github.com/kurin/blazer/AUTHORS delete mode 100644 vendor/github.com/kurin/blazer/LICENSE delete mode 100644 vendor/github.com/kurin/blazer/b2/b2.go delete mode 100644 vendor/github.com/kurin/blazer/b2/backend.go delete mode 100644 vendor/github.com/kurin/blazer/b2/baseline.go delete mode 100644 vendor/github.com/kurin/blazer/b2/buffer.go delete mode 100644 vendor/github.com/kurin/blazer/b2/iterator.go delete mode 100644 vendor/github.com/kurin/blazer/b2/key.go delete mode 100644 vendor/github.com/kurin/blazer/b2/monitor.go delete mode 100644 vendor/github.com/kurin/blazer/b2/reader.go delete mode 100644 vendor/github.com/kurin/blazer/b2/readerat.go delete mode 100644 vendor/github.com/kurin/blazer/b2/writer.go delete mode 100644 vendor/github.com/kurin/blazer/base/base.go delete mode 100644 vendor/github.com/kurin/blazer/base/strings.go delete mode 100644 vendor/github.com/kurin/blazer/internal/b2assets/b2assets.go delete mode 100644 vendor/github.com/kurin/blazer/internal/b2assets/gen.go delete mode 100644 vendor/github.com/kurin/blazer/internal/b2types/b2types.go delete mode 100644 vendor/github.com/kurin/blazer/internal/blog/blog.go delete mode 100644 vendor/github.com/kurin/blazer/x/window/window.go delete mode 100644 vendor/github.com/marstr/guid/.travis.yml delete mode 100644 vendor/github.com/marstr/guid/LICENSE.txt delete mode 100644 vendor/github.com/marstr/guid/README.md delete mode 100644 vendor/github.com/marstr/guid/guid.go delete mode 100644 vendor/github.com/minio/minio-go/v6/.gitignore delete mode 100644 vendor/github.com/minio/minio-go/v6/.travis.yml delete mode 100644 vendor/github.com/minio/minio-go/v6/CONTRIBUTING.md delete mode 100644 vendor/github.com/minio/minio-go/v6/LICENSE delete mode 100644 vendor/github.com/minio/minio-go/v6/MAINTAINERS.md delete mode 100644 vendor/github.com/minio/minio-go/v6/Makefile delete mode 100644 vendor/github.com/minio/minio-go/v6/NOTICE delete mode 100644 vendor/github.com/minio/minio-go/v6/README.md delete mode 100644 vendor/github.com/minio/minio-go/v6/README_zh_CN.md delete mode 100644 vendor/github.com/minio/minio-go/v6/api-compose-object.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-datatypes.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-error-response.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-get-lifecycle.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-get-object-acl.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-get-object-context.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-get-object-file.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-get-object.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-get-options.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-get-policy.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-list.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-notification.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-object-lock.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-object-retention.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-presigned.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-bucket.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object-common.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object-context.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object-copy.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object-file-context.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object-file.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object-multipart.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object-streaming.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-put-object.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-remove.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-s3-datatypes.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-select.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api-stat.go delete mode 100644 vendor/github.com/minio/minio-go/v6/api.go delete mode 100644 vendor/github.com/minio/minio-go/v6/appveyor.yml delete mode 100644 vendor/github.com/minio/minio-go/v6/bucket-cache.go delete mode 100644 vendor/github.com/minio/minio-go/v6/bucket-notification.go delete mode 100644 vendor/github.com/minio/minio-go/v6/constants.go delete mode 100644 vendor/github.com/minio/minio-go/v6/core.go delete mode 100644 vendor/github.com/minio/minio-go/v6/go.mod delete mode 100644 vendor/github.com/minio/minio-go/v6/go.sum delete mode 100644 vendor/github.com/minio/minio-go/v6/hook-reader.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/chain.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/config.json.sample delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.sample delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/doc.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/env_aws.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/env_minio.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/file_aws_credentials.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/file_minio_client.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/iam_aws.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/signature-type.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/static.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_client_grants.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_ldap_identity.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_web_identity.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/encrypt/server-side.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-streaming.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v2.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v4.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/s3signer/utils.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/s3utils/utils.go delete mode 100644 vendor/github.com/minio/minio-go/v6/pkg/set/stringset.go delete mode 100644 vendor/github.com/minio/minio-go/v6/post-policy.go delete mode 100644 vendor/github.com/minio/minio-go/v6/retry-continous.go delete mode 100644 vendor/github.com/minio/minio-go/v6/retry.go delete mode 100644 vendor/github.com/minio/minio-go/v6/s3-endpoints.go delete mode 100644 vendor/github.com/minio/minio-go/v6/s3-error.go delete mode 100644 vendor/github.com/minio/minio-go/v6/staticcheck.conf delete mode 100644 vendor/github.com/minio/minio-go/v6/transport.go delete mode 100644 vendor/github.com/minio/minio-go/v6/utils.go delete mode 100644 vendor/github.com/minio/sha256-simd/.gitignore delete mode 100644 vendor/github.com/minio/sha256-simd/.travis.yml delete mode 100644 vendor/github.com/minio/sha256-simd/LICENSE delete mode 100644 vendor/github.com/minio/sha256-simd/README.md delete mode 100644 vendor/github.com/minio/sha256-simd/appveyor.yml delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid.go delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_386.go delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_386.s delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_arm.go delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_other.go delete mode 100644 vendor/github.com/minio/sha256-simd/go.mod delete mode 100644 vendor/github.com/minio/sha256-simd/sha256.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_other.go delete mode 100644 vendor/github.com/minio/sha256-simd/test-architectures.sh delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/go.mod delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/ncw/swift/.gitignore delete mode 100644 vendor/github.com/ncw/swift/.travis.yml delete mode 100644 vendor/github.com/ncw/swift/COPYING delete mode 100644 vendor/github.com/ncw/swift/README.md delete mode 100644 vendor/github.com/ncw/swift/auth.go delete mode 100644 vendor/github.com/ncw/swift/auth_v3.go delete mode 100644 vendor/github.com/ncw/swift/compatibility_1_0.go delete mode 100644 vendor/github.com/ncw/swift/compatibility_1_1.go delete mode 100644 vendor/github.com/ncw/swift/compatibility_1_6.go delete mode 100644 vendor/github.com/ncw/swift/compatibility_not_1_6.go delete mode 100644 vendor/github.com/ncw/swift/dlo.go delete mode 100644 vendor/github.com/ncw/swift/doc.go delete mode 100644 vendor/github.com/ncw/swift/go.mod delete mode 100644 vendor/github.com/ncw/swift/largeobjects.go delete mode 100644 vendor/github.com/ncw/swift/meta.go delete mode 100644 vendor/github.com/ncw/swift/notes.txt delete mode 100644 vendor/github.com/ncw/swift/slo.go delete mode 100644 vendor/github.com/ncw/swift/swift.go delete mode 100644 vendor/github.com/ncw/swift/timeout_reader.go delete mode 100644 vendor/github.com/ncw/swift/travis_realserver.sh delete mode 100644 vendor/github.com/ncw/swift/watchdog_reader.go delete mode 100644 vendor/github.com/pkg/errors/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/github.com/pkg/errors/LICENSE delete mode 100644 vendor/github.com/pkg/errors/README.md delete mode 100644 vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/github.com/pkg/errors/errors.go delete mode 100644 vendor/github.com/pkg/errors/stack.go delete mode 100644 vendor/github.com/pkg/profile/.travis.yml delete mode 100644 vendor/github.com/pkg/profile/AUTHORS delete mode 100644 vendor/github.com/pkg/profile/LICENSE delete mode 100644 vendor/github.com/pkg/profile/README.md delete mode 100644 vendor/github.com/pkg/profile/mutex.go delete mode 100644 vendor/github.com/pkg/profile/mutex17.go delete mode 100644 vendor/github.com/pkg/profile/profile.go delete mode 100644 vendor/github.com/pkg/profile/trace.go delete mode 100644 vendor/github.com/pkg/profile/trace16.go delete mode 100644 vendor/github.com/pkg/sftp/.gitignore delete mode 100644 vendor/github.com/pkg/sftp/.travis.yml delete mode 100644 vendor/github.com/pkg/sftp/CONTRIBUTORS delete mode 100644 vendor/github.com/pkg/sftp/LICENSE delete mode 100644 vendor/github.com/pkg/sftp/README.md delete mode 100644 vendor/github.com/pkg/sftp/attrs.go delete mode 100644 vendor/github.com/pkg/sftp/attrs_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/attrs_unix.go delete mode 100644 vendor/github.com/pkg/sftp/client.go delete mode 100644 vendor/github.com/pkg/sftp/conn.go delete mode 100644 vendor/github.com/pkg/sftp/debug.go delete mode 100644 vendor/github.com/pkg/sftp/match.go delete mode 100644 vendor/github.com/pkg/sftp/packet-manager.go delete mode 100644 vendor/github.com/pkg/sftp/packet-typing.go delete mode 100644 vendor/github.com/pkg/sftp/packet.go delete mode 100644 vendor/github.com/pkg/sftp/release.go delete mode 100644 vendor/github.com/pkg/sftp/request-attrs.go delete mode 100644 vendor/github.com/pkg/sftp/request-errors.go delete mode 100644 vendor/github.com/pkg/sftp/request-example.go delete mode 100644 vendor/github.com/pkg/sftp/request-interfaces.go delete mode 100644 vendor/github.com/pkg/sftp/request-readme.md delete mode 100644 vendor/github.com/pkg/sftp/request-server.go delete mode 100644 vendor/github.com/pkg/sftp/request-unix.go delete mode 100644 vendor/github.com/pkg/sftp/request.go delete mode 100644 vendor/github.com/pkg/sftp/request_windows.go delete mode 100644 vendor/github.com/pkg/sftp/server.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_darwin.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_impl.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_linux.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/server_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/server_unix.go delete mode 100644 vendor/github.com/pkg/sftp/sftp.go delete mode 100644 vendor/github.com/pkg/xattr/.gitignore delete mode 100644 vendor/github.com/pkg/xattr/.travis.sh delete mode 100644 vendor/github.com/pkg/xattr/.travis.yml delete mode 100644 vendor/github.com/pkg/xattr/LICENSE delete mode 100644 vendor/github.com/pkg/xattr/README.md delete mode 100644 vendor/github.com/pkg/xattr/go.mod delete mode 100644 vendor/github.com/pkg/xattr/xattr.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_bsd.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_darwin.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_linux.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_unsupported.go delete mode 100644 vendor/github.com/restic/chunker/.travis.yml delete mode 100644 vendor/github.com/restic/chunker/LICENSE delete mode 100644 vendor/github.com/restic/chunker/README.md delete mode 100644 vendor/github.com/restic/chunker/chunker.go delete mode 100644 vendor/github.com/restic/chunker/doc.go delete mode 100644 vendor/github.com/restic/chunker/go.mod delete mode 100644 vendor/github.com/restic/chunker/polynomials.go delete mode 100644 vendor/github.com/russross/blackfriday/.gitignore delete mode 100644 vendor/github.com/russross/blackfriday/.travis.yml delete mode 100644 vendor/github.com/russross/blackfriday/LICENSE.txt delete mode 100644 vendor/github.com/russross/blackfriday/README.md delete mode 100644 vendor/github.com/russross/blackfriday/block.go delete mode 100644 vendor/github.com/russross/blackfriday/doc.go delete mode 100644 vendor/github.com/russross/blackfriday/go.mod delete mode 100644 vendor/github.com/russross/blackfriday/html.go delete mode 100644 vendor/github.com/russross/blackfriday/inline.go delete mode 100644 vendor/github.com/russross/blackfriday/latex.go delete mode 100644 vendor/github.com/russross/blackfriday/markdown.go delete mode 100644 vendor/github.com/russross/blackfriday/smartypants.go delete mode 100644 vendor/github.com/satori/go.uuid/.travis.yml delete mode 100644 vendor/github.com/satori/go.uuid/LICENSE delete mode 100644 vendor/github.com/satori/go.uuid/README.md delete mode 100644 vendor/github.com/satori/go.uuid/codec.go delete mode 100644 vendor/github.com/satori/go.uuid/generator.go delete mode 100644 vendor/github.com/satori/go.uuid/sql.go delete mode 100644 vendor/github.com/satori/go.uuid/uuid.go delete mode 100644 vendor/github.com/spf13/cobra/.gitignore delete mode 100644 vendor/github.com/spf13/cobra/.mailmap delete mode 100644 vendor/github.com/spf13/cobra/.travis.yml delete mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt delete mode 100644 vendor/github.com/spf13/cobra/README.md delete mode 100644 vendor/github.com/spf13/cobra/args.go delete mode 100644 vendor/github.com/spf13/cobra/bash_completions.go delete mode 100644 vendor/github.com/spf13/cobra/bash_completions.md delete mode 100644 vendor/github.com/spf13/cobra/cobra.go delete mode 100644 vendor/github.com/spf13/cobra/command.go delete mode 100644 vendor/github.com/spf13/cobra/command_notwin.go delete mode 100644 vendor/github.com/spf13/cobra/command_win.go delete mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.go delete mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.md delete mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.go delete mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.md delete mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.go delete mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.md delete mode 100644 vendor/github.com/spf13/cobra/doc/util.go delete mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.go delete mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.md delete mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go delete mode 100644 vendor/github.com/spf13/pflag/.gitignore delete mode 100644 vendor/github.com/spf13/pflag/.travis.yml delete mode 100644 vendor/github.com/spf13/pflag/LICENSE delete mode 100644 vendor/github.com/spf13/pflag/README.md delete mode 100644 vendor/github.com/spf13/pflag/bool.go delete mode 100644 vendor/github.com/spf13/pflag/bool_slice.go delete mode 100644 vendor/github.com/spf13/pflag/bytes.go delete mode 100644 vendor/github.com/spf13/pflag/count.go delete mode 100644 vendor/github.com/spf13/pflag/duration.go delete mode 100644 vendor/github.com/spf13/pflag/duration_slice.go delete mode 100644 vendor/github.com/spf13/pflag/flag.go delete mode 100644 vendor/github.com/spf13/pflag/float32.go delete mode 100644 vendor/github.com/spf13/pflag/float64.go delete mode 100644 vendor/github.com/spf13/pflag/golangflag.go delete mode 100644 vendor/github.com/spf13/pflag/int.go delete mode 100644 vendor/github.com/spf13/pflag/int16.go delete mode 100644 vendor/github.com/spf13/pflag/int32.go delete mode 100644 vendor/github.com/spf13/pflag/int64.go delete mode 100644 vendor/github.com/spf13/pflag/int8.go delete mode 100644 vendor/github.com/spf13/pflag/int_slice.go delete mode 100644 vendor/github.com/spf13/pflag/ip.go delete mode 100644 vendor/github.com/spf13/pflag/ip_slice.go delete mode 100644 vendor/github.com/spf13/pflag/ipmask.go delete mode 100644 vendor/github.com/spf13/pflag/ipnet.go delete mode 100644 vendor/github.com/spf13/pflag/string.go delete mode 100644 vendor/github.com/spf13/pflag/string_array.go delete mode 100644 vendor/github.com/spf13/pflag/string_slice.go delete mode 100644 vendor/github.com/spf13/pflag/string_to_int.go delete mode 100644 vendor/github.com/spf13/pflag/string_to_string.go delete mode 100644 vendor/github.com/spf13/pflag/uint.go delete mode 100644 vendor/github.com/spf13/pflag/uint16.go delete mode 100644 vendor/github.com/spf13/pflag/uint32.go delete mode 100644 vendor/github.com/spf13/pflag/uint64.go delete mode 100644 vendor/github.com/spf13/pflag/uint8.go delete mode 100644 vendor/github.com/spf13/pflag/uint_slice.go delete mode 100644 vendor/go.opencensus.io/.gitignore delete mode 100644 vendor/go.opencensus.io/.travis.yml delete mode 100644 vendor/go.opencensus.io/AUTHORS delete mode 100644 vendor/go.opencensus.io/CONTRIBUTING.md delete mode 100644 vendor/go.opencensus.io/Gopkg.lock delete mode 100644 vendor/go.opencensus.io/Gopkg.toml delete mode 100644 vendor/go.opencensus.io/LICENSE delete mode 100644 vendor/go.opencensus.io/Makefile delete mode 100644 vendor/go.opencensus.io/README.md delete mode 100644 vendor/go.opencensus.io/appveyor.yml delete mode 100644 vendor/go.opencensus.io/go.mod delete mode 100644 vendor/go.opencensus.io/go.sum delete mode 100644 vendor/go.opencensus.io/internal/internal.go delete mode 100644 vendor/go.opencensus.io/internal/sanitize.go delete mode 100644 vendor/go.opencensus.io/internal/tagencoding/tagencoding.go delete mode 100644 vendor/go.opencensus.io/internal/traceinternals.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/doc.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/label.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/unit.go delete mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go delete mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go delete mode 100644 vendor/go.opencensus.io/opencensus.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client_stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/doc.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/route.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/server.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/trace.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go delete mode 100644 vendor/go.opencensus.io/resource/resource.go delete mode 100644 vendor/go.opencensus.io/stats/doc.go delete mode 100644 vendor/go.opencensus.io/stats/internal/record.go delete mode 100644 vendor/go.opencensus.io/stats/measure.go delete mode 100644 vendor/go.opencensus.io/stats/measure_float64.go delete mode 100644 vendor/go.opencensus.io/stats/measure_int64.go delete mode 100644 vendor/go.opencensus.io/stats/record.go delete mode 100644 vendor/go.opencensus.io/stats/units.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation_data.go delete mode 100644 vendor/go.opencensus.io/stats/view/collector.go delete mode 100644 vendor/go.opencensus.io/stats/view/doc.go delete mode 100644 vendor/go.opencensus.io/stats/view/export.go delete mode 100644 vendor/go.opencensus.io/stats/view/view.go delete mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker_commands.go delete mode 100644 vendor/go.opencensus.io/tag/context.go delete mode 100644 vendor/go.opencensus.io/tag/doc.go delete mode 100644 vendor/go.opencensus.io/tag/key.go delete mode 100644 vendor/go.opencensus.io/tag/map.go delete mode 100644 vendor/go.opencensus.io/tag/map_codec.go delete mode 100644 vendor/go.opencensus.io/tag/profile_19.go delete mode 100644 vendor/go.opencensus.io/tag/profile_not19.go delete mode 100644 vendor/go.opencensus.io/tag/validate.go delete mode 100644 vendor/go.opencensus.io/trace/basetypes.go delete mode 100644 vendor/go.opencensus.io/trace/config.go delete mode 100644 vendor/go.opencensus.io/trace/doc.go delete mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go delete mode 100644 vendor/go.opencensus.io/trace/export.go delete mode 100644 vendor/go.opencensus.io/trace/internal/internal.go delete mode 100644 vendor/go.opencensus.io/trace/lrumap.go delete mode 100644 vendor/go.opencensus.io/trace/propagation/propagation.go delete mode 100644 vendor/go.opencensus.io/trace/sampling.go delete mode 100644 vendor/go.opencensus.io/trace/spanbucket.go delete mode 100644 vendor/go.opencensus.io/trace/spanstore.go delete mode 100644 vendor/go.opencensus.io/trace/status_codes.go delete mode 100644 vendor/go.opencensus.io/trace/trace.go delete mode 100644 vendor/go.opencensus.io/trace/trace_go11.go delete mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go delete mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go delete mode 100644 vendor/golang.org/x/crypto/AUTHORS delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/xor.go delete mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go delete mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/mac_noasm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_generic.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_noasm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s delete mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go delete mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_aix.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 vendor/golang.org/x/net/AUTHORS delete mode 100644 vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go delete mode 100644 vendor/golang.org/x/net/http/httpguts/httplex.go delete mode 100644 vendor/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go delete mode 100644 vendor/golang.org/x/net/idna/trie.go delete mode 100644 vendor/golang.org/x/net/idna/trieval.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/list.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/table.go delete mode 100644 vendor/golang.org/x/net/trace/events.go delete mode 100644 vendor/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/golang.org/x/net/trace/trace.go delete mode 100644 vendor/golang.org/x/oauth2/.travis.yml delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/oauth2/LICENSE delete mode 100644 vendor/golang.org/x/oauth2/README.md delete mode 100644 vendor/golang.org/x/oauth2/go.mod delete mode 100644 vendor/golang.org/x/oauth2/go.sum delete mode 100644 vendor/golang.org/x/oauth2/google/appengine.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen1.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go delete mode 100644 vendor/golang.org/x/oauth2/google/default.go delete mode 100644 vendor/golang.org/x/oauth2/google/doc.go delete mode 100644 vendor/golang.org/x/oauth2/google/google.go delete mode 100644 vendor/golang.org/x/oauth2/google/jwt.go delete mode 100644 vendor/golang.org/x/oauth2/google/sdk.go delete mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go delete mode 100644 vendor/golang.org/x/oauth2/internal/doc.go delete mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/internal/token.go delete mode 100644 vendor/golang.org/x/oauth2/internal/transport.go delete mode 100644 vendor/golang.org/x/oauth2/jws/jws.go delete mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go delete mode 100644 vendor/golang.org/x/oauth2/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/token.go delete mode 100644 vendor/golang.org/x/oauth2/transport.go delete mode 100644 vendor/golang.org/x/sync/AUTHORS delete mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go delete mode 100644 vendor/golang.org/x/sys/AUTHORS delete mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sys/LICENSE delete mode 100644 vendor/golang.org/x/sys/PATENTS delete mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.c delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s delete mode 100644 vendor/golang.org/x/sys/unix/.gitignore delete mode 100644 vendor/golang.org/x/sys/unix/README.md delete mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/aliases.go delete mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/constants.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dirent.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_big.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_little.go delete mode 100644 vendor/golang.org/x/sys/unix/env_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl.go delete mode 100644 vendor/golang.org/x/sys/unix/mkall.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl delete mode 100644 vendor/golang.org/x/sys/unix/openbsd_pledge.go delete mode 100644 vendor/golang.org/x/sys/unix/openbsd_unveil.go delete mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/race.go delete mode 100644 vendor/golang.org/x/sys/unix/race0.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/str.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/timestruct.go delete mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace386_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptracearm_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptracemips_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptracemipsle_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_arm.s delete mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash delete mode 100644 vendor/golang.org/x/sys/windows/mkerrors.go delete mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/race.go delete mode 100644 vendor/golang.org/x/sys/windows/race0.go delete mode 100644 vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/service.go delete mode 100644 vendor/golang.org/x/sys/windows/str.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go delete mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/LICENSE delete mode 100644 vendor/golang.org/x/text/PATENTS delete mode 100644 vendor/golang.org/x/text/encoding/encoding.go delete mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/identifier.go delete mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/mib.go delete mode 100644 vendor/golang.org/x/text/encoding/internal/internal.go delete mode 100644 vendor/golang.org/x/text/encoding/unicode/override.go delete mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode.go delete mode 100644 vendor/golang.org/x/text/internal/utf8internal/utf8internal.go delete mode 100644 vendor/golang.org/x/text/runes/cond.go delete mode 100644 vendor/golang.org/x/text/runes/runes.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go delete mode 100644 vendor/golang.org/x/text/transform/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/input.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go delete mode 100644 vendor/google.golang.org/api/AUTHORS delete mode 100644 vendor/google.golang.org/api/CONTRIBUTORS delete mode 100644 vendor/google.golang.org/api/LICENSE delete mode 100644 vendor/google.golang.org/api/gensupport/backoff.go delete mode 100644 vendor/google.golang.org/api/gensupport/buffer.go delete mode 100644 vendor/google.golang.org/api/gensupport/doc.go delete mode 100644 vendor/google.golang.org/api/gensupport/header.go delete mode 100644 vendor/google.golang.org/api/gensupport/json.go delete mode 100644 vendor/google.golang.org/api/gensupport/jsonfloat.go delete mode 100644 vendor/google.golang.org/api/gensupport/media.go delete mode 100644 vendor/google.golang.org/api/gensupport/params.go delete mode 100644 vendor/google.golang.org/api/gensupport/resumable.go delete mode 100644 vendor/google.golang.org/api/gensupport/retry.go delete mode 100644 vendor/google.golang.org/api/gensupport/send.go delete mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go delete mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE delete mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go delete mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go delete mode 100644 vendor/google.golang.org/api/googleapi/transport/apikey.go delete mode 100644 vendor/google.golang.org/api/googleapi/types.go delete mode 100644 vendor/google.golang.org/api/internal/creds.go delete mode 100644 vendor/google.golang.org/api/internal/pool.go delete mode 100644 vendor/google.golang.org/api/internal/service-account.json delete mode 100644 vendor/google.golang.org/api/internal/settings.go delete mode 100644 vendor/google.golang.org/api/option/credentials_go19.go delete mode 100644 vendor/google.golang.org/api/option/credentials_notgo19.go delete mode 100644 vendor/google.golang.org/api/option/option.go delete mode 100644 vendor/google.golang.org/api/storage/v1/storage-api.json delete mode 100644 vendor/google.golang.org/api/storage/v1/storage-gen.go delete mode 100644 vendor/google.golang.org/api/transport/http/dial.go delete mode 100644 vendor/google.golang.org/api/transport/http/dial_appengine.go delete mode 100644 vendor/google.golang.org/api/transport/http/internal/propagation/http.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/appengine/LICENSE delete mode 100644 vendor/google.golang.org/appengine/README.md delete mode 100644 vendor/google.golang.org/appengine/appengine.go delete mode 100644 vendor/google.golang.org/appengine/appengine_vm.go delete mode 100644 vendor/google.golang.org/appengine/errors.go delete mode 100644 vendor/google.golang.org/appengine/go.mod delete mode 100644 vendor/google.golang.org/appengine/go.sum delete mode 100644 vendor/google.golang.org/appengine/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/api.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/main.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/net.go delete mode 100644 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto delete mode 100644 vendor/google.golang.org/appengine/namespace.go delete mode 100644 vendor/google.golang.org/appengine/timeout.go delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh delete mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go delete mode 100644 vendor/google.golang.org/genproto/LICENSE delete mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml delete mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/grpc/LICENSE delete mode 100644 vendor/google.golang.org/grpc/Makefile delete mode 100644 vendor/google.golang.org/grpc/README.md delete mode 100644 vendor/google.golang.org/grpc/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go delete mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go delete mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go delete mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go delete mode 100644 vendor/google.golang.org/grpc/call.go delete mode 100644 vendor/google.golang.org/grpc/clientconn.go delete mode 100644 vendor/google.golang.org/grpc/codec.go delete mode 100644 vendor/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/google.golang.org/grpc/codes/codes.go delete mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go delete mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go delete mode 100644 vendor/google.golang.org/grpc/credentials/tls13.go delete mode 100644 vendor/google.golang.org/grpc/dialoptions.go delete mode 100644 vendor/google.golang.org/grpc/doc.go delete mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go delete mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go delete mode 100644 vendor/google.golang.org/grpc/go.mod delete mode 100644 vendor/google.golang.org/grpc/go.sum delete mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh delete mode 100644 vendor/google.golang.org/grpc/interceptor.go delete mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go delete mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go delete mode 100644 vendor/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go delete mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go delete mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/pickfirst.go delete mode 100644 vendor/google.golang.org/grpc/proxy.go delete mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go delete mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/rpc_util.go delete mode 100644 vendor/google.golang.org/grpc/server.go delete mode 100644 vendor/google.golang.org/grpc/service_config.go delete mode 100644 vendor/google.golang.org/grpc/stats/handlers.go delete mode 100644 vendor/google.golang.org/grpc/stats/stats.go delete mode 100644 vendor/google.golang.org/grpc/status/status.go delete mode 100644 vendor/google.golang.org/grpc/stream.go delete mode 100644 vendor/google.golang.org/grpc/tap/tap.go delete mode 100644 vendor/google.golang.org/grpc/trace.go delete mode 100644 vendor/google.golang.org/grpc/version.go delete mode 100644 vendor/google.golang.org/grpc/vet.sh delete mode 100644 vendor/gopkg.in/ini.v1/.gitignore delete mode 100644 vendor/gopkg.in/ini.v1/.travis.yml delete mode 100644 vendor/gopkg.in/ini.v1/LICENSE delete mode 100644 vendor/gopkg.in/ini.v1/Makefile delete mode 100644 vendor/gopkg.in/ini.v1/README.md delete mode 100644 vendor/gopkg.in/ini.v1/error.go delete mode 100644 vendor/gopkg.in/ini.v1/file.go delete mode 100644 vendor/gopkg.in/ini.v1/ini.go delete mode 100644 vendor/gopkg.in/ini.v1/key.go delete mode 100644 vendor/gopkg.in/ini.v1/parser.go delete mode 100644 vendor/gopkg.in/ini.v1/section.go delete mode 100644 vendor/gopkg.in/ini.v1/struct.go delete mode 100644 vendor/gopkg.in/tomb.v2/LICENSE delete mode 100644 vendor/gopkg.in/tomb.v2/README.md delete mode 100644 vendor/gopkg.in/tomb.v2/context.go delete mode 100644 vendor/gopkg.in/tomb.v2/context16.go delete mode 100644 vendor/gopkg.in/tomb.v2/tomb.go delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 vendor/gopkg.in/yaml.v2/NOTICE delete mode 100644 vendor/gopkg.in/yaml.v2/README.md delete mode 100644 vendor/gopkg.in/yaml.v2/apic.go delete mode 100644 vendor/gopkg.in/yaml.v2/decode.go delete mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 vendor/gopkg.in/yaml.v2/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v2/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go delete mode 100644 vendor/modules.txt diff --git a/vendor/bazil.org/fuse/.gitattributes b/vendor/bazil.org/fuse/.gitattributes deleted file mode 100644 index b65f2a9ff..000000000 --- a/vendor/bazil.org/fuse/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go filter=gofmt -*.cgo filter=gofmt diff --git a/vendor/bazil.org/fuse/.gitignore b/vendor/bazil.org/fuse/.gitignore deleted file mode 100644 index 53589948c..000000000 --- a/vendor/bazil.org/fuse/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -*~ -.#* -## the next line needs to start with a backslash to avoid looking like -## a comment -\#*# -.*.swp - -*.test - -/clockfs -/hellofs diff --git a/vendor/bazil.org/fuse/LICENSE b/vendor/bazil.org/fuse/LICENSE deleted file mode 100644 index 4ac7cd838..000000000 --- a/vendor/bazil.org/fuse/LICENSE +++ /dev/null @@ -1,93 +0,0 @@ -Copyright (c) 2013-2015 Tommi Virtanen. -Copyright (c) 2009, 2011, 2012 The Go Authors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -The following included software components have additional copyright -notices and license terms that may differ from the above. - - -File fuse.go: - -// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, -// which carries this notice: -// -// The files in this directory are subject to the following license. -// -// The author of this software is Russ Cox. -// -// Copyright (c) 2006 Russ Cox -// -// Permission to use, copy, modify, and distribute this software for any -// purpose without fee is hereby granted, provided that this entire notice -// is included in all copies of any software which is or includes a copy -// or modification of this software and in all copies of the supporting -// documentation for such software. -// -// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED -// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY -// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS -// FITNESS FOR ANY PARTICULAR PURPOSE. - - -File fuse_kernel.go: - -// Derived from FUSE's fuse_kernel.h -/* - This file defines the kernel interface of FUSE - Copyright (C) 2001-2007 Miklos Szeredi - - - This -- and only this -- header file may also be distributed under - the terms of the BSD Licence as follows: - - Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - SUCH DAMAGE. -*/ diff --git a/vendor/bazil.org/fuse/README.md b/vendor/bazil.org/fuse/README.md deleted file mode 100644 index 8c6d556ee..000000000 --- a/vendor/bazil.org/fuse/README.md +++ /dev/null @@ -1,23 +0,0 @@ -bazil.org/fuse -- Filesystems in Go -=================================== - -`bazil.org/fuse` is a Go library for writing FUSE userspace -filesystems. - -It is a from-scratch implementation of the kernel-userspace -communication protocol, and does not use the C library from the -project called FUSE. `bazil.org/fuse` embraces Go fully for safety and -ease of programming. - -Here’s how to get going: - - go get bazil.org/fuse - -Website: http://bazil.org/fuse/ - -Github repository: https://github.com/bazil/fuse - -API docs: http://godoc.org/bazil.org/fuse - -Our thanks to Russ Cox for his fuse library, which this project is -based on. diff --git a/vendor/bazil.org/fuse/buffer.go b/vendor/bazil.org/fuse/buffer.go deleted file mode 100644 index bb1d2b776..000000000 --- a/vendor/bazil.org/fuse/buffer.go +++ /dev/null @@ -1,35 +0,0 @@ -package fuse - -import "unsafe" - -// buffer provides a mechanism for constructing a message from -// multiple segments. -type buffer []byte - -// alloc allocates size bytes and returns a pointer to the new -// segment. -func (w *buffer) alloc(size uintptr) unsafe.Pointer { - s := int(size) - if len(*w)+s > cap(*w) { - old := *w - *w = make([]byte, len(*w), 2*cap(*w)+s) - copy(*w, old) - } - l := len(*w) - *w = (*w)[:l+s] - return unsafe.Pointer(&(*w)[l]) -} - -// reset clears out the contents of the buffer. -func (w *buffer) reset() { - for i := range (*w)[:cap(*w)] { - (*w)[i] = 0 - } - *w = (*w)[:0] -} - -func newBuffer(extra uintptr) buffer { - const hdrSize = unsafe.Sizeof(outHeader{}) - buf := make(buffer, hdrSize, hdrSize+extra) - return buf -} diff --git a/vendor/bazil.org/fuse/debug.go b/vendor/bazil.org/fuse/debug.go deleted file mode 100644 index be9f900d5..000000000 --- a/vendor/bazil.org/fuse/debug.go +++ /dev/null @@ -1,21 +0,0 @@ -package fuse - -import ( - "runtime" -) - -func stack() string { - buf := make([]byte, 1024) - return string(buf[:runtime.Stack(buf, false)]) -} - -func nop(msg interface{}) {} - -// Debug is called to output debug messages, including protocol -// traces. The default behavior is to do nothing. -// -// The messages have human-friendly string representations and are -// safe to marshal to JSON. -// -// Implementations must not retain msg. -var Debug func(msg interface{}) = nop diff --git a/vendor/bazil.org/fuse/error_darwin.go b/vendor/bazil.org/fuse/error_darwin.go deleted file mode 100644 index a3fb89ca2..000000000 --- a/vendor/bazil.org/fuse/error_darwin.go +++ /dev/null @@ -1,17 +0,0 @@ -package fuse - -import ( - "syscall" -) - -const ( - ENOATTR = Errno(syscall.ENOATTR) -) - -const ( - errNoXattr = ENOATTR -) - -func init() { - errnoNames[errNoXattr] = "ENOATTR" -} diff --git a/vendor/bazil.org/fuse/error_freebsd.go b/vendor/bazil.org/fuse/error_freebsd.go deleted file mode 100644 index c6ea6d6e7..000000000 --- a/vendor/bazil.org/fuse/error_freebsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package fuse - -import "syscall" - -const ( - ENOATTR = Errno(syscall.ENOATTR) -) - -const ( - errNoXattr = ENOATTR -) - -func init() { - errnoNames[errNoXattr] = "ENOATTR" -} diff --git a/vendor/bazil.org/fuse/error_linux.go b/vendor/bazil.org/fuse/error_linux.go deleted file mode 100644 index 6f113e71e..000000000 --- a/vendor/bazil.org/fuse/error_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -package fuse - -import ( - "syscall" -) - -const ( - ENODATA = Errno(syscall.ENODATA) -) - -const ( - errNoXattr = ENODATA -) - -func init() { - errnoNames[errNoXattr] = "ENODATA" -} diff --git a/vendor/bazil.org/fuse/error_std.go b/vendor/bazil.org/fuse/error_std.go deleted file mode 100644 index 398f43fbf..000000000 --- a/vendor/bazil.org/fuse/error_std.go +++ /dev/null @@ -1,31 +0,0 @@ -package fuse - -// There is very little commonality in extended attribute errors -// across platforms. -// -// getxattr return value for "extended attribute does not exist" is -// ENOATTR on OS X, and ENODATA on Linux and apparently at least -// NetBSD. There may be a #define ENOATTR on Linux too, but the value -// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no -// ENODATA, only ENOATTR. ENOATTR is not in any of the standards, -// ENODATA exists but is only used for STREAMs. -// -// Each platform will define it a errNoXattr constant, and this file -// will enforce that it implements the right interfaces and hide the -// implementation. -// -// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html -// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html -// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html -// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html -// http://www.freebsd.org/cgi/man.cgi?query=extattr_get_file&sektion=2 -// http://nixdoc.net/man-pages/openbsd/man2/extattr_get_file.2.html - -// ErrNoXattr is a platform-independent error value meaning the -// extended attribute was not found. It can be used to respond to -// GetxattrRequest and such. -const ErrNoXattr = errNoXattr - -var _ error = ErrNoXattr -var _ Errno = ErrNoXattr -var _ ErrorNumber = ErrNoXattr diff --git a/vendor/bazil.org/fuse/fs/serve.go b/vendor/bazil.org/fuse/fs/serve.go deleted file mode 100644 index e9fc56590..000000000 --- a/vendor/bazil.org/fuse/fs/serve.go +++ /dev/null @@ -1,1568 +0,0 @@ -// FUSE service loop, for servers that wish to use it. - -package fs // import "bazil.org/fuse/fs" - -import ( - "encoding/binary" - "fmt" - "hash/fnv" - "io" - "log" - "reflect" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" -) - -import ( - "bytes" - - "bazil.org/fuse" - "bazil.org/fuse/fuseutil" -) - -const ( - attrValidTime = 1 * time.Minute - entryValidTime = 1 * time.Minute -) - -// TODO: FINISH DOCS - -// An FS is the interface required of a file system. -// -// Other FUSE requests can be handled by implementing methods from the -// FS* interfaces, for example FSStatfser. -type FS interface { - // Root is called to obtain the Node for the file system root. - Root() (Node, error) -} - -type FSStatfser interface { - // Statfs is called to obtain file system metadata. - // It should write that data to resp. - Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error -} - -type FSDestroyer interface { - // Destroy is called when the file system is shutting down. - // - // Linux only sends this request for block device backed (fuseblk) - // filesystems, to allow them to flush writes to disk before the - // unmount completes. - Destroy() -} - -type FSInodeGenerator interface { - // GenerateInode is called to pick a dynamic inode number when it - // would otherwise be 0. - // - // Not all filesystems bother tracking inodes, but FUSE requires - // the inode to be set, and fewer duplicates in general makes UNIX - // tools work better. - // - // Operations where the nodes may return 0 inodes include Getattr, - // Setattr and ReadDir. - // - // If FS does not implement FSInodeGenerator, GenerateDynamicInode - // is used. - // - // Implementing this is useful to e.g. constrain the range of - // inode values used for dynamic inodes. - GenerateInode(parentInode uint64, name string) uint64 -} - -// A Node is the interface required of a file or directory. -// See the documentation for type FS for general information -// pertaining to all methods. -// -// A Node must be usable as a map key, that is, it cannot be a -// function, map or slice. -// -// Other FUSE requests can be handled by implementing methods from the -// Node* interfaces, for example NodeOpener. -// -// Methods returning Node should take care to return the same Node -// when the result is logically the same instance. Without this, each -// Node will get a new NodeID, causing spurious cache invalidations, -// extra lookups and aliasing anomalies. This may not matter for a -// simple, read-only filesystem. -type Node interface { - // Attr fills attr with the standard metadata for the node. - // - // Fields with reasonable defaults are prepopulated. For example, - // all times are set to a fixed moment when the program started. - // - // If Inode is left as 0, a dynamic inode number is chosen. - // - // The result may be cached for the duration set in Valid. - Attr(ctx context.Context, attr *fuse.Attr) error -} - -type NodeGetattrer interface { - // Getattr obtains the standard metadata for the receiver. - // It should store that metadata in resp. - // - // If this method is not implemented, the attributes will be - // generated based on Attr(), with zero values filled in. - Getattr(ctx context.Context, req *fuse.GetattrRequest, resp *fuse.GetattrResponse) error -} - -type NodeSetattrer interface { - // Setattr sets the standard metadata for the receiver. - // - // Note, this is also used to communicate changes in the size of - // the file, outside of Writes. - // - // req.Valid is a bitmask of what fields are actually being set. - // For example, the method should not change the mode of the file - // unless req.Valid.Mode() is true. - Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error -} - -type NodeSymlinker interface { - // Symlink creates a new symbolic link in the receiver, which must be a directory. - // - // TODO is the above true about directories? - Symlink(ctx context.Context, req *fuse.SymlinkRequest) (Node, error) -} - -// This optional request will be called only for symbolic link nodes. -type NodeReadlinker interface { - // Readlink reads a symbolic link. - Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) -} - -type NodeLinker interface { - // Link creates a new directory entry in the receiver based on an - // existing Node. Receiver must be a directory. - Link(ctx context.Context, req *fuse.LinkRequest, old Node) (Node, error) -} - -type NodeRemover interface { - // Remove removes the entry with the given name from - // the receiver, which must be a directory. The entry to be removed - // may correspond to a file (unlink) or to a directory (rmdir). - Remove(ctx context.Context, req *fuse.RemoveRequest) error -} - -type NodeAccesser interface { - // Access checks whether the calling context has permission for - // the given operations on the receiver. If so, Access should - // return nil. If not, Access should return EPERM. - // - // Note that this call affects the result of the access(2) system - // call but not the open(2) system call. If Access is not - // implemented, the Node behaves as if it always returns nil - // (permission granted), relying on checks in Open instead. - Access(ctx context.Context, req *fuse.AccessRequest) error -} - -type NodeStringLookuper interface { - // Lookup looks up a specific entry in the receiver, - // which must be a directory. Lookup should return a Node - // corresponding to the entry. If the name does not exist in - // the directory, Lookup should return ENOENT. - // - // Lookup need not to handle the names "." and "..". - Lookup(ctx context.Context, name string) (Node, error) -} - -type NodeRequestLookuper interface { - // Lookup looks up a specific entry in the receiver. - // See NodeStringLookuper for more. - Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (Node, error) -} - -type NodeMkdirer interface { - Mkdir(ctx context.Context, req *fuse.MkdirRequest) (Node, error) -} - -type NodeOpener interface { - // Open opens the receiver. After a successful open, a client - // process has a file descriptor referring to this Handle. - // - // Open can also be also called on non-files. For example, - // directories are Opened for ReadDir or fchdir(2). - // - // If this method is not implemented, the open will always - // succeed, and the Node itself will be used as the Handle. - // - // XXX note about access. XXX OpenFlags. - Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (Handle, error) -} - -type NodeCreater interface { - // Create creates a new directory entry in the receiver, which - // must be a directory. - Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (Node, Handle, error) -} - -type NodeForgetter interface { - // Forget about this node. This node will not receive further - // method calls. - // - // Forget is not necessarily seen on unmount, as all nodes are - // implicitly forgotten as part part of the unmount. - Forget() -} - -type NodeRenamer interface { - Rename(ctx context.Context, req *fuse.RenameRequest, newDir Node) error -} - -type NodeMknoder interface { - Mknod(ctx context.Context, req *fuse.MknodRequest) (Node, error) -} - -// TODO this should be on Handle not Node -type NodeFsyncer interface { - Fsync(ctx context.Context, req *fuse.FsyncRequest) error -} - -type NodeGetxattrer interface { - // Getxattr gets an extended attribute by the given name from the - // node. - // - // If there is no xattr by that name, returns fuse.ErrNoXattr. - Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error -} - -type NodeListxattrer interface { - // Listxattr lists the extended attributes recorded for the node. - Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error -} - -type NodeSetxattrer interface { - // Setxattr sets an extended attribute with the given name and - // value for the node. - Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error -} - -type NodeRemovexattrer interface { - // Removexattr removes an extended attribute for the name. - // - // If there is no xattr by that name, returns fuse.ErrNoXattr. - Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error -} - -var startTime = time.Now() - -func nodeAttr(ctx context.Context, n Node, attr *fuse.Attr) error { - attr.Valid = attrValidTime - attr.Nlink = 1 - attr.Atime = startTime - attr.Mtime = startTime - attr.Ctime = startTime - attr.Crtime = startTime - if err := n.Attr(ctx, attr); err != nil { - return err - } - return nil -} - -// A Handle is the interface required of an opened file or directory. -// See the documentation for type FS for general information -// pertaining to all methods. -// -// Other FUSE requests can be handled by implementing methods from the -// Handle* interfaces. The most common to implement are HandleReader, -// HandleReadDirer, and HandleWriter. -// -// TODO implement methods: Getlk, Setlk, Setlkw -type Handle interface { -} - -type HandleFlusher interface { - // Flush is called each time the file or directory is closed. - // Because there can be multiple file descriptors referring to a - // single opened file, Flush can be called multiple times. - Flush(ctx context.Context, req *fuse.FlushRequest) error -} - -type HandleReadAller interface { - ReadAll(ctx context.Context) ([]byte, error) -} - -type HandleReadDirAller interface { - ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) -} - -type HandleReader interface { - // Read requests to read data from the handle. - // - // There is a page cache in the kernel that normally submits only - // page-aligned reads spanning one or more pages. However, you - // should not rely on this. To see individual requests as - // submitted by the file system clients, set OpenDirectIO. - // - // Note that reads beyond the size of the file as reported by Attr - // are not even attempted (except in OpenDirectIO mode). - Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error -} - -type HandleWriter interface { - // Write requests to write data into the handle at the given offset. - // Store the amount of data written in resp.Size. - // - // There is a writeback page cache in the kernel that normally submits - // only page-aligned writes spanning one or more pages. However, - // you should not rely on this. To see individual requests as - // submitted by the file system clients, set OpenDirectIO. - // - // Writes that grow the file are expected to update the file size - // (as seen through Attr). Note that file size changes are - // communicated also through Setattr. - Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error -} - -type HandleReleaser interface { - Release(ctx context.Context, req *fuse.ReleaseRequest) error -} - -type Config struct { - // Function to send debug log messages to. If nil, use fuse.Debug. - // Note that changing this or fuse.Debug may not affect existing - // calls to Serve. - // - // See fuse.Debug for the rules that log functions must follow. - Debug func(msg interface{}) - - // Function to put things into context for processing the request. - // The returned context must have ctx as its parent. - // - // Note that changing this may not affect existing calls to Serve. - // - // Must not retain req. - WithContext func(ctx context.Context, req fuse.Request) context.Context -} - -// New returns a new FUSE server ready to serve this kernel FUSE -// connection. -// -// Config may be nil. -func New(conn *fuse.Conn, config *Config) *Server { - s := &Server{ - conn: conn, - req: map[fuse.RequestID]*serveRequest{}, - nodeRef: map[Node]fuse.NodeID{}, - dynamicInode: GenerateDynamicInode, - } - if config != nil { - s.debug = config.Debug - s.context = config.WithContext - } - if s.debug == nil { - s.debug = fuse.Debug - } - return s -} - -type Server struct { - // set in New - conn *fuse.Conn - debug func(msg interface{}) - context func(ctx context.Context, req fuse.Request) context.Context - - // set once at Serve time - fs FS - dynamicInode func(parent uint64, name string) uint64 - - // state, protected by meta - meta sync.Mutex - req map[fuse.RequestID]*serveRequest - node []*serveNode - nodeRef map[Node]fuse.NodeID - handle []*serveHandle - freeNode []fuse.NodeID - freeHandle []fuse.HandleID - nodeGen uint64 - - // Used to ensure worker goroutines finish before Serve returns - wg sync.WaitGroup -} - -// Serve serves the FUSE connection by making calls to the methods -// of fs and the Nodes and Handles it makes available. It returns only -// when the connection has been closed or an unexpected error occurs. -func (s *Server) Serve(fs FS) error { - defer s.wg.Wait() // Wait for worker goroutines to complete before return - - s.fs = fs - if dyn, ok := fs.(FSInodeGenerator); ok { - s.dynamicInode = dyn.GenerateInode - } - - root, err := fs.Root() - if err != nil { - return fmt.Errorf("cannot obtain root node: %v", err) - } - // Recognize the root node if it's ever returned from Lookup, - // passed to Invalidate, etc. - s.nodeRef[root] = 1 - s.node = append(s.node, nil, &serveNode{ - inode: 1, - generation: s.nodeGen, - node: root, - refs: 1, - }) - s.handle = append(s.handle, nil) - - for { - req, err := s.conn.ReadRequest() - if err != nil { - if err == io.EOF { - break - } - return err - } - - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.serve(req) - }() - } - return nil -} - -// Serve serves a FUSE connection with the default settings. See -// Server.Serve. -func Serve(c *fuse.Conn, fs FS) error { - server := New(c, nil) - return server.Serve(fs) -} - -type nothing struct{} - -type serveRequest struct { - Request fuse.Request - cancel func() -} - -type serveNode struct { - inode uint64 - generation uint64 - node Node - refs uint64 - - // Delay freeing the NodeID until waitgroup is done. This allows - // using the NodeID for short periods of time without holding the - // Server.meta lock. - // - // Rules: - // - // - hold Server.meta while calling wg.Add, then unlock - // - do NOT try to reacquire Server.meta - wg sync.WaitGroup -} - -func (sn *serveNode) attr(ctx context.Context, attr *fuse.Attr) error { - err := nodeAttr(ctx, sn.node, attr) - if attr.Inode == 0 { - attr.Inode = sn.inode - } - return err -} - -type serveHandle struct { - handle Handle - readData []byte - nodeID fuse.NodeID -} - -// NodeRef is deprecated. It remains here to decrease code churn on -// FUSE library users. You may remove it from your program now; -// returning the same Node values are now recognized automatically, -// without needing NodeRef. -type NodeRef struct{} - -func (c *Server) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) { - c.meta.Lock() - defer c.meta.Unlock() - - if id, ok := c.nodeRef[node]; ok { - sn := c.node[id] - sn.refs++ - return id, sn.generation - } - - sn := &serveNode{inode: inode, node: node, refs: 1} - if n := len(c.freeNode); n > 0 { - id = c.freeNode[n-1] - c.freeNode = c.freeNode[:n-1] - c.node[id] = sn - c.nodeGen++ - } else { - id = fuse.NodeID(len(c.node)) - c.node = append(c.node, sn) - } - sn.generation = c.nodeGen - c.nodeRef[node] = id - return id, sn.generation -} - -func (c *Server) saveHandle(handle Handle, nodeID fuse.NodeID) (id fuse.HandleID) { - c.meta.Lock() - shandle := &serveHandle{handle: handle, nodeID: nodeID} - if n := len(c.freeHandle); n > 0 { - id = c.freeHandle[n-1] - c.freeHandle = c.freeHandle[:n-1] - c.handle[id] = shandle - } else { - id = fuse.HandleID(len(c.handle)) - c.handle = append(c.handle, shandle) - } - c.meta.Unlock() - return -} - -type nodeRefcountDropBug struct { - N uint64 - Refs uint64 - Node fuse.NodeID -} - -func (n *nodeRefcountDropBug) String() string { - return fmt.Sprintf("bug: trying to drop %d of %d references to %v", n.N, n.Refs, n.Node) -} - -func (c *Server) dropNode(id fuse.NodeID, n uint64) (forget bool) { - c.meta.Lock() - defer c.meta.Unlock() - snode := c.node[id] - - if snode == nil { - // this should only happen if refcounts kernel<->us disagree - // *and* two ForgetRequests for the same node race each other; - // this indicates a bug somewhere - c.debug(nodeRefcountDropBug{N: n, Node: id}) - - // we may end up triggering Forget twice, but that's better - // than not even once, and that's the best we can do - return true - } - - if n > snode.refs { - c.debug(nodeRefcountDropBug{N: n, Refs: snode.refs, Node: id}) - n = snode.refs - } - - snode.refs -= n - if snode.refs == 0 { - snode.wg.Wait() - c.node[id] = nil - delete(c.nodeRef, snode.node) - c.freeNode = append(c.freeNode, id) - return true - } - return false -} - -func (c *Server) dropHandle(id fuse.HandleID) { - c.meta.Lock() - c.handle[id] = nil - c.freeHandle = append(c.freeHandle, id) - c.meta.Unlock() -} - -type missingHandle struct { - Handle fuse.HandleID - MaxHandle fuse.HandleID -} - -func (m missingHandle) String() string { - return fmt.Sprint("missing handle: ", m.Handle, m.MaxHandle) -} - -// Returns nil for invalid handles. -func (c *Server) getHandle(id fuse.HandleID) (shandle *serveHandle) { - c.meta.Lock() - defer c.meta.Unlock() - if id < fuse.HandleID(len(c.handle)) { - shandle = c.handle[uint(id)] - } - if shandle == nil { - c.debug(missingHandle{ - Handle: id, - MaxHandle: fuse.HandleID(len(c.handle)), - }) - } - return -} - -type request struct { - Op string - Request *fuse.Header - In interface{} `json:",omitempty"` -} - -func (r request) String() string { - return fmt.Sprintf("<- %s", r.In) -} - -type logResponseHeader struct { - ID fuse.RequestID -} - -func (m logResponseHeader) String() string { - return fmt.Sprintf("ID=%v", m.ID) -} - -type response struct { - Op string - Request logResponseHeader - Out interface{} `json:",omitempty"` - // Errno contains the errno value as a string, for example "EPERM". - Errno string `json:",omitempty"` - // Error may contain a free form error message. - Error string `json:",omitempty"` -} - -func (r response) errstr() string { - s := r.Errno - if r.Error != "" { - // prefix the errno constant to the long form message - s = s + ": " + r.Error - } - return s -} - -func (r response) String() string { - switch { - case r.Errno != "" && r.Out != nil: - return fmt.Sprintf("-> [%v] %v error=%s", r.Request, r.Out, r.errstr()) - case r.Errno != "": - return fmt.Sprintf("-> [%v] %s error=%s", r.Request, r.Op, r.errstr()) - case r.Out != nil: - // make sure (seemingly) empty values are readable - switch r.Out.(type) { - case string: - return fmt.Sprintf("-> [%v] %s %q", r.Request, r.Op, r.Out) - case []byte: - return fmt.Sprintf("-> [%v] %s [% x]", r.Request, r.Op, r.Out) - default: - return fmt.Sprintf("-> [%v] %v", r.Request, r.Out) - } - default: - return fmt.Sprintf("-> [%v] %s", r.Request, r.Op) - } -} - -type notification struct { - Op string - Node fuse.NodeID - Out interface{} `json:",omitempty"` - Err string `json:",omitempty"` -} - -func (n notification) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "=> %s %v", n.Op, n.Node) - if n.Out != nil { - // make sure (seemingly) empty values are readable - switch n.Out.(type) { - case string: - fmt.Fprintf(&buf, " %q", n.Out) - case []byte: - fmt.Fprintf(&buf, " [% x]", n.Out) - default: - fmt.Fprintf(&buf, " %s", n.Out) - } - } - if n.Err != "" { - fmt.Fprintf(&buf, " Err:%v", n.Err) - } - return buf.String() -} - -type logMissingNode struct { - MaxNode fuse.NodeID -} - -func opName(req fuse.Request) string { - t := reflect.Indirect(reflect.ValueOf(req)).Type() - s := t.Name() - s = strings.TrimSuffix(s, "Request") - return s -} - -type logLinkRequestOldNodeNotFound struct { - Request *fuse.Header - In *fuse.LinkRequest -} - -func (m *logLinkRequestOldNodeNotFound) String() string { - return fmt.Sprintf("In LinkRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.OldNode) -} - -type renameNewDirNodeNotFound struct { - Request *fuse.Header - In *fuse.RenameRequest -} - -func (m *renameNewDirNodeNotFound) String() string { - return fmt.Sprintf("In RenameRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.NewDir) -} - -type handlerPanickedError struct { - Request interface{} - Err interface{} -} - -var _ error = handlerPanickedError{} - -func (h handlerPanickedError) Error() string { - return fmt.Sprintf("handler panicked: %v", h.Err) -} - -var _ fuse.ErrorNumber = handlerPanickedError{} - -func (h handlerPanickedError) Errno() fuse.Errno { - if err, ok := h.Err.(fuse.ErrorNumber); ok { - return err.Errno() - } - return fuse.DefaultErrno -} - -// handlerTerminatedError happens when a handler terminates itself -// with runtime.Goexit. This is most commonly because of incorrect use -// of testing.TB.FailNow, typically via t.Fatal. -type handlerTerminatedError struct { - Request interface{} -} - -var _ error = handlerTerminatedError{} - -func (h handlerTerminatedError) Error() string { - return fmt.Sprintf("handler terminated (called runtime.Goexit)") -} - -var _ fuse.ErrorNumber = handlerTerminatedError{} - -func (h handlerTerminatedError) Errno() fuse.Errno { - return fuse.DefaultErrno -} - -type handleNotReaderError struct { - handle Handle -} - -var _ error = handleNotReaderError{} - -func (e handleNotReaderError) Error() string { - return fmt.Sprintf("handle has no Read: %T", e.handle) -} - -var _ fuse.ErrorNumber = handleNotReaderError{} - -func (e handleNotReaderError) Errno() fuse.Errno { - return fuse.ENOTSUP -} - -func initLookupResponse(s *fuse.LookupResponse) { - s.EntryValid = entryValidTime -} - -func (c *Server) serve(r fuse.Request) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - parentCtx := ctx - if c.context != nil { - ctx = c.context(ctx, r) - } - - req := &serveRequest{Request: r, cancel: cancel} - - c.debug(request{ - Op: opName(r), - Request: r.Hdr(), - In: r, - }) - var node Node - var snode *serveNode - c.meta.Lock() - hdr := r.Hdr() - if id := hdr.Node; id != 0 { - if id < fuse.NodeID(len(c.node)) { - snode = c.node[uint(id)] - } - if snode == nil { - c.meta.Unlock() - c.debug(response{ - Op: opName(r), - Request: logResponseHeader{ID: hdr.ID}, - Error: fuse.ESTALE.ErrnoName(), - // this is the only place that sets both Error and - // Out; not sure if i want to do that; might get rid - // of len(c.node) things altogether - Out: logMissingNode{ - MaxNode: fuse.NodeID(len(c.node)), - }, - }) - r.RespondError(fuse.ESTALE) - return - } - node = snode.node - } - if c.req[hdr.ID] != nil { - // This happens with OSXFUSE. Assume it's okay and - // that we'll never see an interrupt for this one. - // Otherwise everything wedges. TODO: Report to OSXFUSE? - // - // TODO this might have been because of missing done() calls - } else { - c.req[hdr.ID] = req - } - c.meta.Unlock() - - // Call this before responding. - // After responding is too late: we might get another request - // with the same ID and be very confused. - done := func(resp interface{}) { - msg := response{ - Op: opName(r), - Request: logResponseHeader{ID: hdr.ID}, - } - if err, ok := resp.(error); ok { - msg.Error = err.Error() - if ferr, ok := err.(fuse.ErrorNumber); ok { - errno := ferr.Errno() - msg.Errno = errno.ErrnoName() - if errno == err { - // it's just a fuse.Errno with no extra detail; - // skip the textual message for log readability - msg.Error = "" - } - } else { - msg.Errno = fuse.DefaultErrno.ErrnoName() - } - } else { - msg.Out = resp - } - c.debug(msg) - - c.meta.Lock() - delete(c.req, hdr.ID) - c.meta.Unlock() - } - - var responded bool - defer func() { - if rec := recover(); rec != nil { - const size = 1 << 16 - buf := make([]byte, size) - n := runtime.Stack(buf, false) - buf = buf[:n] - log.Printf("fuse: panic in handler for %v: %v\n%s", r, rec, buf) - err := handlerPanickedError{ - Request: r, - Err: rec, - } - done(err) - r.RespondError(err) - return - } - - if !responded { - err := handlerTerminatedError{ - Request: r, - } - done(err) - r.RespondError(err) - } - }() - - if err := c.handleRequest(ctx, node, snode, r, done); err != nil { - if err == context.Canceled { - select { - case <-parentCtx.Done(): - // We canceled the parent context because of an - // incoming interrupt request, so return EINTR - // to trigger the right behavior in the client app. - // - // Only do this when it's the parent context that was - // canceled, not a context controlled by the program - // using this library, so we don't return EINTR too - // eagerly -- it might cause busy loops. - // - // Decent write-up on role of EINTR: - // http://250bpm.com/blog:12 - err = fuse.EINTR - default: - // nothing - } - } - done(err) - r.RespondError(err) - } - - // disarm runtime.Goexit protection - responded = true -} - -// handleRequest will either a) call done(s) and r.Respond(s) OR b) return an error. -func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, r fuse.Request, done func(resp interface{})) error { - switch r := r.(type) { - default: - // Note: To FUSE, ENOSYS means "this server never implements this request." - // It would be inappropriate to return ENOSYS for other operations in this - // switch that might only be unavailable in some contexts, not all. - return fuse.ENOSYS - - case *fuse.StatfsRequest: - s := &fuse.StatfsResponse{} - if fs, ok := c.fs.(FSStatfser); ok { - if err := fs.Statfs(ctx, r, s); err != nil { - return err - } - } - done(s) - r.Respond(s) - return nil - - // Node operations. - case *fuse.GetattrRequest: - s := &fuse.GetattrResponse{} - if n, ok := node.(NodeGetattrer); ok { - if err := n.Getattr(ctx, r, s); err != nil { - return err - } - } else { - if err := snode.attr(ctx, &s.Attr); err != nil { - return err - } - } - done(s) - r.Respond(s) - return nil - - case *fuse.SetattrRequest: - s := &fuse.SetattrResponse{} - if n, ok := node.(NodeSetattrer); ok { - if err := n.Setattr(ctx, r, s); err != nil { - return err - } - } - - if err := snode.attr(ctx, &s.Attr); err != nil { - return err - } - done(s) - r.Respond(s) - return nil - - case *fuse.SymlinkRequest: - s := &fuse.SymlinkResponse{} - initLookupResponse(&s.LookupResponse) - n, ok := node.(NodeSymlinker) - if !ok { - return fuse.EIO // XXX or EPERM like Mkdir? - } - n2, err := n.Symlink(ctx, r) - if err != nil { - return err - } - if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.NewName, n2); err != nil { - return err - } - done(s) - r.Respond(s) - return nil - - case *fuse.ReadlinkRequest: - n, ok := node.(NodeReadlinker) - if !ok { - return fuse.EIO /// XXX or EPERM? - } - target, err := n.Readlink(ctx, r) - if err != nil { - return err - } - done(target) - r.Respond(target) - return nil - - case *fuse.LinkRequest: - n, ok := node.(NodeLinker) - if !ok { - return fuse.EIO /// XXX or EPERM? - } - c.meta.Lock() - var oldNode *serveNode - if int(r.OldNode) < len(c.node) { - oldNode = c.node[r.OldNode] - } - c.meta.Unlock() - if oldNode == nil { - c.debug(logLinkRequestOldNodeNotFound{ - Request: r.Hdr(), - In: r, - }) - return fuse.EIO - } - n2, err := n.Link(ctx, r, oldNode.node) - if err != nil { - return err - } - s := &fuse.LookupResponse{} - initLookupResponse(s) - if err := c.saveLookup(ctx, s, snode, r.NewName, n2); err != nil { - return err - } - done(s) - r.Respond(s) - return nil - - case *fuse.RemoveRequest: - n, ok := node.(NodeRemover) - if !ok { - return fuse.EIO /// XXX or EPERM? - } - err := n.Remove(ctx, r) - if err != nil { - return err - } - done(nil) - r.Respond() - return nil - - case *fuse.AccessRequest: - if n, ok := node.(NodeAccesser); ok { - if err := n.Access(ctx, r); err != nil { - return err - } - } - done(nil) - r.Respond() - return nil - - case *fuse.LookupRequest: - var n2 Node - var err error - s := &fuse.LookupResponse{} - initLookupResponse(s) - if n, ok := node.(NodeStringLookuper); ok { - n2, err = n.Lookup(ctx, r.Name) - } else if n, ok := node.(NodeRequestLookuper); ok { - n2, err = n.Lookup(ctx, r, s) - } else { - return fuse.ENOENT - } - if err != nil { - return err - } - if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { - return err - } - done(s) - r.Respond(s) - return nil - - case *fuse.MkdirRequest: - s := &fuse.MkdirResponse{} - initLookupResponse(&s.LookupResponse) - n, ok := node.(NodeMkdirer) - if !ok { - return fuse.EPERM - } - n2, err := n.Mkdir(ctx, r) - if err != nil { - return err - } - if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { - return err - } - done(s) - r.Respond(s) - return nil - - case *fuse.OpenRequest: - s := &fuse.OpenResponse{} - var h2 Handle - if n, ok := node.(NodeOpener); ok { - hh, err := n.Open(ctx, r, s) - if err != nil { - return err - } - h2 = hh - } else { - h2 = node - } - s.Handle = c.saveHandle(h2, r.Hdr().Node) - done(s) - r.Respond(s) - return nil - - case *fuse.CreateRequest: - n, ok := node.(NodeCreater) - if !ok { - // If we send back ENOSYS, FUSE will try mknod+open. - return fuse.EPERM - } - s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}} - initLookupResponse(&s.LookupResponse) - n2, h2, err := n.Create(ctx, r, s) - if err != nil { - return err - } - if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { - return err - } - s.Handle = c.saveHandle(h2, r.Hdr().Node) - done(s) - r.Respond(s) - return nil - - case *fuse.GetxattrRequest: - n, ok := node.(NodeGetxattrer) - if !ok { - return fuse.ENOTSUP - } - s := &fuse.GetxattrResponse{} - err := n.Getxattr(ctx, r, s) - if err != nil { - return err - } - if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { - return fuse.ERANGE - } - done(s) - r.Respond(s) - return nil - - case *fuse.ListxattrRequest: - n, ok := node.(NodeListxattrer) - if !ok { - return fuse.ENOTSUP - } - s := &fuse.ListxattrResponse{} - err := n.Listxattr(ctx, r, s) - if err != nil { - return err - } - if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { - return fuse.ERANGE - } - done(s) - r.Respond(s) - return nil - - case *fuse.SetxattrRequest: - n, ok := node.(NodeSetxattrer) - if !ok { - return fuse.ENOTSUP - } - err := n.Setxattr(ctx, r) - if err != nil { - return err - } - done(nil) - r.Respond() - return nil - - case *fuse.RemovexattrRequest: - n, ok := node.(NodeRemovexattrer) - if !ok { - return fuse.ENOTSUP - } - err := n.Removexattr(ctx, r) - if err != nil { - return err - } - done(nil) - r.Respond() - return nil - - case *fuse.ForgetRequest: - forget := c.dropNode(r.Hdr().Node, r.N) - if forget { - n, ok := node.(NodeForgetter) - if ok { - n.Forget() - } - } - done(nil) - r.Respond() - return nil - - // Handle operations. - case *fuse.ReadRequest: - shandle := c.getHandle(r.Handle) - if shandle == nil { - return fuse.ESTALE - } - handle := shandle.handle - - s := &fuse.ReadResponse{Data: make([]byte, 0, r.Size)} - if r.Dir { - if h, ok := handle.(HandleReadDirAller); ok { - // detect rewinddir(3) or similar seek and refresh - // contents - if r.Offset == 0 { - shandle.readData = nil - } - - if shandle.readData == nil { - dirs, err := h.ReadDirAll(ctx) - if err != nil { - return err - } - var data []byte - for _, dir := range dirs { - if dir.Inode == 0 { - dir.Inode = c.dynamicInode(snode.inode, dir.Name) - } - data = fuse.AppendDirent(data, dir) - } - shandle.readData = data - } - fuseutil.HandleRead(r, s, shandle.readData) - done(s) - r.Respond(s) - return nil - } - } else { - if h, ok := handle.(HandleReadAller); ok { - if shandle.readData == nil { - data, err := h.ReadAll(ctx) - if err != nil { - return err - } - if data == nil { - data = []byte{} - } - shandle.readData = data - } - fuseutil.HandleRead(r, s, shandle.readData) - done(s) - r.Respond(s) - return nil - } - h, ok := handle.(HandleReader) - if !ok { - err := handleNotReaderError{handle: handle} - return err - } - if err := h.Read(ctx, r, s); err != nil { - return err - } - } - done(s) - r.Respond(s) - return nil - - case *fuse.WriteRequest: - shandle := c.getHandle(r.Handle) - if shandle == nil { - return fuse.ESTALE - } - - s := &fuse.WriteResponse{} - if h, ok := shandle.handle.(HandleWriter); ok { - if err := h.Write(ctx, r, s); err != nil { - return err - } - done(s) - r.Respond(s) - return nil - } - return fuse.EIO - - case *fuse.FlushRequest: - shandle := c.getHandle(r.Handle) - if shandle == nil { - return fuse.ESTALE - } - handle := shandle.handle - - if h, ok := handle.(HandleFlusher); ok { - if err := h.Flush(ctx, r); err != nil { - return err - } - } - done(nil) - r.Respond() - return nil - - case *fuse.ReleaseRequest: - shandle := c.getHandle(r.Handle) - if shandle == nil { - return fuse.ESTALE - } - handle := shandle.handle - - // No matter what, release the handle. - c.dropHandle(r.Handle) - - if h, ok := handle.(HandleReleaser); ok { - if err := h.Release(ctx, r); err != nil { - return err - } - } - done(nil) - r.Respond() - return nil - - case *fuse.DestroyRequest: - if fs, ok := c.fs.(FSDestroyer); ok { - fs.Destroy() - } - done(nil) - r.Respond() - return nil - - case *fuse.RenameRequest: - c.meta.Lock() - var newDirNode *serveNode - if int(r.NewDir) < len(c.node) { - newDirNode = c.node[r.NewDir] - } - c.meta.Unlock() - if newDirNode == nil { - c.debug(renameNewDirNodeNotFound{ - Request: r.Hdr(), - In: r, - }) - return fuse.EIO - } - n, ok := node.(NodeRenamer) - if !ok { - return fuse.EIO // XXX or EPERM like Mkdir? - } - err := n.Rename(ctx, r, newDirNode.node) - if err != nil { - return err - } - done(nil) - r.Respond() - return nil - - case *fuse.MknodRequest: - n, ok := node.(NodeMknoder) - if !ok { - return fuse.EIO - } - n2, err := n.Mknod(ctx, r) - if err != nil { - return err - } - s := &fuse.LookupResponse{} - initLookupResponse(s) - if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { - return err - } - done(s) - r.Respond(s) - return nil - - case *fuse.FsyncRequest: - n, ok := node.(NodeFsyncer) - if !ok { - return fuse.EIO - } - err := n.Fsync(ctx, r) - if err != nil { - return err - } - done(nil) - r.Respond() - return nil - - case *fuse.InterruptRequest: - c.meta.Lock() - ireq := c.req[r.IntrID] - if ireq != nil && ireq.cancel != nil { - ireq.cancel() - ireq.cancel = nil - } - c.meta.Unlock() - done(nil) - r.Respond() - return nil - - /* case *FsyncdirRequest: - return ENOSYS - - case *GetlkRequest, *SetlkRequest, *SetlkwRequest: - return ENOSYS - - case *BmapRequest: - return ENOSYS - - case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest: - return ENOSYS - */ - } - - panic("not reached") -} - -func (c *Server) saveLookup(ctx context.Context, s *fuse.LookupResponse, snode *serveNode, elem string, n2 Node) error { - if err := nodeAttr(ctx, n2, &s.Attr); err != nil { - return err - } - if s.Attr.Inode == 0 { - s.Attr.Inode = c.dynamicInode(snode.inode, elem) - } - - s.Node, s.Generation = c.saveNode(s.Attr.Inode, n2) - return nil -} - -type invalidateNodeDetail struct { - Off int64 - Size int64 -} - -func (i invalidateNodeDetail) String() string { - return fmt.Sprintf("Off:%d Size:%d", i.Off, i.Size) -} - -func errstr(err error) string { - if err == nil { - return "" - } - return err.Error() -} - -func (s *Server) invalidateNode(node Node, off int64, size int64) error { - s.meta.Lock() - id, ok := s.nodeRef[node] - if ok { - snode := s.node[id] - snode.wg.Add(1) - defer snode.wg.Done() - } - s.meta.Unlock() - if !ok { - // This is what the kernel would have said, if we had been - // able to send this message; it's not cached. - return fuse.ErrNotCached - } - // Delay logging until after we can record the error too. We - // consider a /dev/fuse write to be instantaneous enough to not - // need separate before and after messages. - err := s.conn.InvalidateNode(id, off, size) - s.debug(notification{ - Op: "InvalidateNode", - Node: id, - Out: invalidateNodeDetail{ - Off: off, - Size: size, - }, - Err: errstr(err), - }) - return err -} - -// InvalidateNodeAttr invalidates the kernel cache of the attributes -// of node. -// -// Returns fuse.ErrNotCached if the kernel is not currently caching -// the node. -func (s *Server) InvalidateNodeAttr(node Node) error { - return s.invalidateNode(node, 0, 0) -} - -// InvalidateNodeData invalidates the kernel cache of the attributes -// and data of node. -// -// Returns fuse.ErrNotCached if the kernel is not currently caching -// the node. -func (s *Server) InvalidateNodeData(node Node) error { - return s.invalidateNode(node, 0, -1) -} - -// InvalidateNodeDataRange invalidates the kernel cache of the -// attributes and a range of the data of node. -// -// Returns fuse.ErrNotCached if the kernel is not currently caching -// the node. -func (s *Server) InvalidateNodeDataRange(node Node, off int64, size int64) error { - return s.invalidateNode(node, off, size) -} - -type invalidateEntryDetail struct { - Name string -} - -func (i invalidateEntryDetail) String() string { - return fmt.Sprintf("%q", i.Name) -} - -// InvalidateEntry invalidates the kernel cache of the directory entry -// identified by parent node and entry basename. -// -// Kernel may or may not cache directory listings. To invalidate -// those, use InvalidateNode to invalidate all of the data for a -// directory. (As of 2015-06, Linux FUSE does not cache directory -// listings.) -// -// Returns ErrNotCached if the kernel is not currently caching the -// node. -func (s *Server) InvalidateEntry(parent Node, name string) error { - s.meta.Lock() - id, ok := s.nodeRef[parent] - if ok { - snode := s.node[id] - snode.wg.Add(1) - defer snode.wg.Done() - } - s.meta.Unlock() - if !ok { - // This is what the kernel would have said, if we had been - // able to send this message; it's not cached. - return fuse.ErrNotCached - } - err := s.conn.InvalidateEntry(id, name) - s.debug(notification{ - Op: "InvalidateEntry", - Node: id, - Out: invalidateEntryDetail{ - Name: name, - }, - Err: errstr(err), - }) - return err -} - -// DataHandle returns a read-only Handle that satisfies reads -// using the given data. -func DataHandle(data []byte) Handle { - return &dataHandle{data} -} - -type dataHandle struct { - data []byte -} - -func (d *dataHandle) ReadAll(ctx context.Context) ([]byte, error) { - return d.data, nil -} - -// GenerateDynamicInode returns a dynamic inode. -// -// The parent inode and current entry name are used as the criteria -// for choosing a pseudorandom inode. This makes it likely the same -// entry will get the same inode on multiple runs. -func GenerateDynamicInode(parent uint64, name string) uint64 { - h := fnv.New64a() - var buf [8]byte - binary.LittleEndian.PutUint64(buf[:], parent) - _, _ = h.Write(buf[:]) - _, _ = h.Write([]byte(name)) - var inode uint64 - for { - inode = h.Sum64() - if inode != 0 { - break - } - // there's a tiny probability that result is zero; change the - // input a little and try again - _, _ = h.Write([]byte{'x'}) - } - return inode -} diff --git a/vendor/bazil.org/fuse/fs/tree.go b/vendor/bazil.org/fuse/fs/tree.go deleted file mode 100644 index 7e078045a..000000000 --- a/vendor/bazil.org/fuse/fs/tree.go +++ /dev/null @@ -1,99 +0,0 @@ -// FUSE directory tree, for servers that wish to use it with the service loop. - -package fs - -import ( - "os" - pathpkg "path" - "strings" - - "golang.org/x/net/context" -) - -import ( - "bazil.org/fuse" -) - -// A Tree implements a basic read-only directory tree for FUSE. -// The Nodes contained in it may still be writable. -type Tree struct { - tree -} - -func (t *Tree) Root() (Node, error) { - return &t.tree, nil -} - -// Add adds the path to the tree, resolving to the given node. -// If path or a prefix of path has already been added to the tree, -// Add panics. -// -// Add is only safe to call before starting to serve requests. -func (t *Tree) Add(path string, node Node) { - path = pathpkg.Clean("/" + path)[1:] - elems := strings.Split(path, "/") - dir := Node(&t.tree) - for i, elem := range elems { - dt, ok := dir.(*tree) - if !ok { - panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path) - } - n := dt.lookup(elem) - if n != nil { - if i+1 == len(elems) { - panic("fuse: Tree.Add for " + path + " conflicts with " + elem) - } - dir = n - } else { - if i+1 == len(elems) { - dt.add(elem, node) - } else { - dir = &tree{} - dt.add(elem, dir) - } - } - } -} - -type treeDir struct { - name string - node Node -} - -type tree struct { - dir []treeDir -} - -func (t *tree) lookup(name string) Node { - for _, d := range t.dir { - if d.name == name { - return d.node - } - } - return nil -} - -func (t *tree) add(name string, n Node) { - t.dir = append(t.dir, treeDir{name, n}) -} - -func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error { - a.Mode = os.ModeDir | 0555 - return nil -} - -func (t *tree) Lookup(ctx context.Context, name string) (Node, error) { - n := t.lookup(name) - if n != nil { - return n, nil - } - return nil, fuse.ENOENT -} - -func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { - var out []fuse.Dirent - for _, d := range t.dir { - out = append(out, fuse.Dirent{Name: d.name}) - } - return out, nil -} diff --git a/vendor/bazil.org/fuse/fuse.go b/vendor/bazil.org/fuse/fuse.go deleted file mode 100644 index 7dc70f9e1..000000000 --- a/vendor/bazil.org/fuse/fuse.go +++ /dev/null @@ -1,2304 +0,0 @@ -// See the file LICENSE for copyright and licensing information. - -// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, -// which carries this notice: -// -// The files in this directory are subject to the following license. -// -// The author of this software is Russ Cox. -// -// Copyright (c) 2006 Russ Cox -// -// Permission to use, copy, modify, and distribute this software for any -// purpose without fee is hereby granted, provided that this entire notice -// is included in all copies of any software which is or includes a copy -// or modification of this software and in all copies of the supporting -// documentation for such software. -// -// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED -// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY -// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS -// FITNESS FOR ANY PARTICULAR PURPOSE. - -// Package fuse enables writing FUSE file systems on Linux, OS X, and FreeBSD. -// -// On OS X, it requires OSXFUSE (http://osxfuse.github.com/). -// -// There are two approaches to writing a FUSE file system. The first is to speak -// the low-level message protocol, reading from a Conn using ReadRequest and -// writing using the various Respond methods. This approach is closest to -// the actual interaction with the kernel and can be the simplest one in contexts -// such as protocol translators. -// -// Servers of synthesized file systems tend to share common -// bookkeeping abstracted away by the second approach, which is to -// call fs.Serve to serve the FUSE protocol using an implementation of -// the service methods in the interfaces FS* (file system), Node* (file -// or directory), and Handle* (opened file or directory). -// There are a daunting number of such methods that can be written, -// but few are required. -// The specific methods are described in the documentation for those interfaces. -// -// The hellofs subdirectory contains a simple illustration of the fs.Serve approach. -// -// Service Methods -// -// The required and optional methods for the FS, Node, and Handle interfaces -// have the general form -// -// Op(ctx context.Context, req *OpRequest, resp *OpResponse) error -// -// where Op is the name of a FUSE operation. Op reads request -// parameters from req and writes results to resp. An operation whose -// only result is the error result omits the resp parameter. -// -// Multiple goroutines may call service methods simultaneously; the -// methods being called are responsible for appropriate -// synchronization. -// -// The operation must not hold on to the request or response, -// including any []byte fields such as WriteRequest.Data or -// SetxattrRequest.Xattr. -// -// Errors -// -// Operations can return errors. The FUSE interface can only -// communicate POSIX errno error numbers to file system clients, the -// message is not visible to file system clients. The returned error -// can implement ErrorNumber to control the errno returned. Without -// ErrorNumber, a generic errno (EIO) is returned. -// -// Error messages will be visible in the debug log as part of the -// response. -// -// Interrupted Operations -// -// In some file systems, some operations -// may take an undetermined amount of time. For example, a Read waiting for -// a network message or a matching Write might wait indefinitely. If the request -// is cancelled and no longer needed, the context will be cancelled. -// Blocking operations should select on a receive from ctx.Done() and attempt to -// abort the operation early if the receive succeeds (meaning the channel is closed). -// To indicate that the operation failed because it was aborted, return fuse.EINTR. -// -// If an operation does not block for an indefinite amount of time, supporting -// cancellation is not necessary. -// -// Authentication -// -// All requests types embed a Header, meaning that the method can -// inspect req.Pid, req.Uid, and req.Gid as necessary to implement -// permission checking. The kernel FUSE layer normally prevents other -// users from accessing the FUSE file system (to change this, see -// AllowOther, AllowRoot), but does not enforce access modes (to -// change this, see DefaultPermissions). -// -// Mount Options -// -// Behavior and metadata of the mounted file system can be changed by -// passing MountOption values to Mount. -// -package fuse // import "bazil.org/fuse" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "sync" - "syscall" - "time" - "unsafe" -) - -// A Conn represents a connection to a mounted FUSE file system. -type Conn struct { - // Ready is closed when the mount is complete or has failed. - Ready <-chan struct{} - - // MountError stores any error from the mount process. Only valid - // after Ready is closed. - MountError error - - // File handle for kernel communication. Only safe to access if - // rio or wio is held. - dev *os.File - wio sync.RWMutex - rio sync.RWMutex - - // Protocol version negotiated with InitRequest/InitResponse. - proto Protocol -} - -// MountpointDoesNotExistError is an error returned when the -// mountpoint does not exist. -type MountpointDoesNotExistError struct { - Path string -} - -var _ error = (*MountpointDoesNotExistError)(nil) - -func (e *MountpointDoesNotExistError) Error() string { - return fmt.Sprintf("mountpoint does not exist: %v", e.Path) -} - -// Mount mounts a new FUSE connection on the named directory -// and returns a connection for reading and writing FUSE messages. -// -// After a successful return, caller must call Close to free -// resources. -// -// Even on successful return, the new mount is not guaranteed to be -// visible until after Conn.Ready is closed. See Conn.MountError for -// possible errors. Incoming requests on Conn must be served to make -// progress. -func Mount(dir string, options ...MountOption) (*Conn, error) { - conf := mountConfig{ - options: make(map[string]string), - } - for _, option := range options { - if err := option(&conf); err != nil { - return nil, err - } - } - - ready := make(chan struct{}, 1) - c := &Conn{ - Ready: ready, - } - f, err := mount(dir, &conf, ready, &c.MountError) - if err != nil { - return nil, err - } - c.dev = f - - if err := initMount(c, &conf); err != nil { - c.Close() - if err == ErrClosedWithoutInit { - // see if we can provide a better error - <-c.Ready - if err := c.MountError; err != nil { - return nil, err - } - } - return nil, err - } - - return c, nil -} - -type OldVersionError struct { - Kernel Protocol - LibraryMin Protocol -} - -func (e *OldVersionError) Error() string { - return fmt.Sprintf("kernel FUSE version is too old: %v < %v", e.Kernel, e.LibraryMin) -} - -var ( - ErrClosedWithoutInit = errors.New("fuse connection closed without init") -) - -func initMount(c *Conn, conf *mountConfig) error { - req, err := c.ReadRequest() - if err != nil { - if err == io.EOF { - return ErrClosedWithoutInit - } - return err - } - r, ok := req.(*InitRequest) - if !ok { - return fmt.Errorf("missing init, got: %T", req) - } - - min := Protocol{protoVersionMinMajor, protoVersionMinMinor} - if r.Kernel.LT(min) { - req.RespondError(Errno(syscall.EPROTO)) - c.Close() - return &OldVersionError{ - Kernel: r.Kernel, - LibraryMin: min, - } - } - - proto := Protocol{protoVersionMaxMajor, protoVersionMaxMinor} - if r.Kernel.LT(proto) { - // Kernel doesn't support the latest version we have. - proto = r.Kernel - } - c.proto = proto - - s := &InitResponse{ - Library: proto, - MaxReadahead: conf.maxReadahead, - MaxWrite: maxWrite, - Flags: InitBigWrites | conf.initFlags, - } - r.Respond(s) - return nil -} - -// A Request represents a single FUSE request received from the kernel. -// Use a type switch to determine the specific kind. -// A request of unrecognized type will have concrete type *Header. -type Request interface { - // Hdr returns the Header associated with this request. - Hdr() *Header - - // RespondError responds to the request with the given error. - RespondError(error) - - String() string -} - -// A RequestID identifies an active FUSE request. -type RequestID uint64 - -func (r RequestID) String() string { - return fmt.Sprintf("%#x", uint64(r)) -} - -// A NodeID is a number identifying a directory or file. -// It must be unique among IDs returned in LookupResponses -// that have not yet been forgotten by ForgetRequests. -type NodeID uint64 - -func (n NodeID) String() string { - return fmt.Sprintf("%#x", uint64(n)) -} - -// A HandleID is a number identifying an open directory or file. -// It only needs to be unique while the directory or file is open. -type HandleID uint64 - -func (h HandleID) String() string { - return fmt.Sprintf("%#x", uint64(h)) -} - -// The RootID identifies the root directory of a FUSE file system. -const RootID NodeID = rootID - -// A Header describes the basic information sent in every request. -type Header struct { - Conn *Conn `json:"-"` // connection this request was received on - ID RequestID // unique ID for request - Node NodeID // file or directory the request is about - Uid uint32 // user ID of process making request - Gid uint32 // group ID of process making request - Pid uint32 // process ID of process making request - - // for returning to reqPool - msg *message -} - -func (h *Header) String() string { - return fmt.Sprintf("ID=%v Node=%v Uid=%d Gid=%d Pid=%d", h.ID, h.Node, h.Uid, h.Gid, h.Pid) -} - -func (h *Header) Hdr() *Header { - return h -} - -func (h *Header) noResponse() { - putMessage(h.msg) -} - -func (h *Header) respond(msg []byte) { - out := (*outHeader)(unsafe.Pointer(&msg[0])) - out.Unique = uint64(h.ID) - h.Conn.respond(msg) - putMessage(h.msg) -} - -// An ErrorNumber is an error with a specific error number. -// -// Operations may return an error value that implements ErrorNumber to -// control what specific error number (errno) to return. -type ErrorNumber interface { - // Errno returns the the error number (errno) for this error. - Errno() Errno -} - -const ( - // ENOSYS indicates that the call is not supported. - ENOSYS = Errno(syscall.ENOSYS) - - // ESTALE is used by Serve to respond to violations of the FUSE protocol. - ESTALE = Errno(syscall.ESTALE) - - ENOENT = Errno(syscall.ENOENT) - EIO = Errno(syscall.EIO) - EPERM = Errno(syscall.EPERM) - - // EINTR indicates request was interrupted by an InterruptRequest. - // See also fs.Intr. - EINTR = Errno(syscall.EINTR) - - ERANGE = Errno(syscall.ERANGE) - ENOTSUP = Errno(syscall.ENOTSUP) - EEXIST = Errno(syscall.EEXIST) -) - -// DefaultErrno is the errno used when error returned does not -// implement ErrorNumber. -const DefaultErrno = EIO - -var errnoNames = map[Errno]string{ - ENOSYS: "ENOSYS", - ESTALE: "ESTALE", - ENOENT: "ENOENT", - EIO: "EIO", - EPERM: "EPERM", - EINTR: "EINTR", - EEXIST: "EEXIST", -} - -// Errno implements Error and ErrorNumber using a syscall.Errno. -type Errno syscall.Errno - -var _ = ErrorNumber(Errno(0)) -var _ = error(Errno(0)) - -func (e Errno) Errno() Errno { - return e -} - -func (e Errno) String() string { - return syscall.Errno(e).Error() -} - -func (e Errno) Error() string { - return syscall.Errno(e).Error() -} - -// ErrnoName returns the short non-numeric identifier for this errno. -// For example, "EIO". -func (e Errno) ErrnoName() string { - s := errnoNames[e] - if s == "" { - s = fmt.Sprint(e.Errno()) - } - return s -} - -func (e Errno) MarshalText() ([]byte, error) { - s := e.ErrnoName() - return []byte(s), nil -} - -func (h *Header) RespondError(err error) { - errno := DefaultErrno - if ferr, ok := err.(ErrorNumber); ok { - errno = ferr.Errno() - } - // FUSE uses negative errors! - // TODO: File bug report against OSXFUSE: positive error causes kernel panic. - buf := newBuffer(0) - hOut := (*outHeader)(unsafe.Pointer(&buf[0])) - hOut.Error = -int32(errno) - h.respond(buf) -} - -// All requests read from the kernel, without data, are shorter than -// this. -var maxRequestSize = syscall.Getpagesize() -var bufSize = maxRequestSize + maxWrite - -// reqPool is a pool of messages. -// -// Lifetime of a logical message is from getMessage to putMessage. -// getMessage is called by ReadRequest. putMessage is called by -// Conn.ReadRequest, Request.Respond, or Request.RespondError. -// -// Messages in the pool are guaranteed to have conn and off zeroed, -// buf allocated and len==bufSize, and hdr set. -var reqPool = sync.Pool{ - New: allocMessage, -} - -func allocMessage() interface{} { - m := &message{buf: make([]byte, bufSize)} - m.hdr = (*inHeader)(unsafe.Pointer(&m.buf[0])) - return m -} - -func getMessage(c *Conn) *message { - m := reqPool.Get().(*message) - m.conn = c - return m -} - -func putMessage(m *message) { - m.buf = m.buf[:bufSize] - m.conn = nil - m.off = 0 - reqPool.Put(m) -} - -// a message represents the bytes of a single FUSE message -type message struct { - conn *Conn - buf []byte // all bytes - hdr *inHeader // header - off int // offset for reading additional fields -} - -func (m *message) len() uintptr { - return uintptr(len(m.buf) - m.off) -} - -func (m *message) data() unsafe.Pointer { - var p unsafe.Pointer - if m.off < len(m.buf) { - p = unsafe.Pointer(&m.buf[m.off]) - } - return p -} - -func (m *message) bytes() []byte { - return m.buf[m.off:] -} - -func (m *message) Header() Header { - h := m.hdr - return Header{ - Conn: m.conn, - ID: RequestID(h.Unique), - Node: NodeID(h.Nodeid), - Uid: h.Uid, - Gid: h.Gid, - Pid: h.Pid, - - msg: m, - } -} - -// fileMode returns a Go os.FileMode from a Unix mode. -func fileMode(unixMode uint32) os.FileMode { - mode := os.FileMode(unixMode & 0777) - switch unixMode & syscall.S_IFMT { - case syscall.S_IFREG: - // nothing - case syscall.S_IFDIR: - mode |= os.ModeDir - case syscall.S_IFCHR: - mode |= os.ModeCharDevice | os.ModeDevice - case syscall.S_IFBLK: - mode |= os.ModeDevice - case syscall.S_IFIFO: - mode |= os.ModeNamedPipe - case syscall.S_IFLNK: - mode |= os.ModeSymlink - case syscall.S_IFSOCK: - mode |= os.ModeSocket - default: - // no idea - mode |= os.ModeDevice - } - if unixMode&syscall.S_ISUID != 0 { - mode |= os.ModeSetuid - } - if unixMode&syscall.S_ISGID != 0 { - mode |= os.ModeSetgid - } - return mode -} - -type noOpcode struct { - Opcode uint32 -} - -func (m noOpcode) String() string { - return fmt.Sprintf("No opcode %v", m.Opcode) -} - -type malformedMessage struct { -} - -func (malformedMessage) String() string { - return "malformed message" -} - -// Close closes the FUSE connection. -func (c *Conn) Close() error { - c.wio.Lock() - defer c.wio.Unlock() - c.rio.Lock() - defer c.rio.Unlock() - return c.dev.Close() -} - -// caller must hold wio or rio -func (c *Conn) fd() int { - return int(c.dev.Fd()) -} - -func (c *Conn) Protocol() Protocol { - return c.proto -} - -// ReadRequest returns the next FUSE request from the kernel. -// -// Caller must call either Request.Respond or Request.RespondError in -// a reasonable time. Caller must not retain Request after that call. -func (c *Conn) ReadRequest() (Request, error) { - m := getMessage(c) -loop: - c.rio.RLock() - n, err := syscall.Read(c.fd(), m.buf) - c.rio.RUnlock() - if err == syscall.EINTR { - // OSXFUSE sends EINTR to userspace when a request interrupt - // completed before it got sent to userspace? - goto loop - } - if err != nil && err != syscall.ENODEV { - putMessage(m) - return nil, err - } - if n <= 0 { - putMessage(m) - return nil, io.EOF - } - m.buf = m.buf[:n] - - if n < inHeaderSize { - putMessage(m) - return nil, errors.New("fuse: message too short") - } - - // FreeBSD FUSE sends a short length in the header - // for FUSE_INIT even though the actual read length is correct. - if n == inHeaderSize+initInSize && m.hdr.Opcode == opInit && m.hdr.Len < uint32(n) { - m.hdr.Len = uint32(n) - } - - // OSXFUSE sometimes sends the wrong m.hdr.Len in a FUSE_WRITE message. - if m.hdr.Len < uint32(n) && m.hdr.Len >= uint32(unsafe.Sizeof(writeIn{})) && m.hdr.Opcode == opWrite { - m.hdr.Len = uint32(n) - } - - if m.hdr.Len != uint32(n) { - // prepare error message before returning m to pool - err := fmt.Errorf("fuse: read %d opcode %d but expected %d", n, m.hdr.Opcode, m.hdr.Len) - putMessage(m) - return nil, err - } - - m.off = inHeaderSize - - // Convert to data structures. - // Do not trust kernel to hand us well-formed data. - var req Request - switch m.hdr.Opcode { - default: - Debug(noOpcode{Opcode: m.hdr.Opcode}) - goto unrecognized - - case opLookup: - buf := m.bytes() - n := len(buf) - if n == 0 || buf[n-1] != '\x00' { - goto corrupt - } - req = &LookupRequest{ - Header: m.Header(), - Name: string(buf[:n-1]), - } - - case opForget: - in := (*forgetIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &ForgetRequest{ - Header: m.Header(), - N: in.Nlookup, - } - - case opGetattr: - switch { - case c.proto.LT(Protocol{7, 9}): - req = &GetattrRequest{ - Header: m.Header(), - } - - default: - in := (*getattrIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &GetattrRequest{ - Header: m.Header(), - Flags: GetattrFlags(in.GetattrFlags), - Handle: HandleID(in.Fh), - } - } - - case opSetattr: - in := (*setattrIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &SetattrRequest{ - Header: m.Header(), - Valid: SetattrValid(in.Valid), - Handle: HandleID(in.Fh), - Size: in.Size, - Atime: time.Unix(int64(in.Atime), int64(in.AtimeNsec)), - Mtime: time.Unix(int64(in.Mtime), int64(in.MtimeNsec)), - Mode: fileMode(in.Mode), - Uid: in.Uid, - Gid: in.Gid, - Bkuptime: in.BkupTime(), - Chgtime: in.Chgtime(), - Flags: in.Flags(), - } - - case opReadlink: - if len(m.bytes()) > 0 { - goto corrupt - } - req = &ReadlinkRequest{ - Header: m.Header(), - } - - case opSymlink: - // m.bytes() is "newName\0target\0" - names := m.bytes() - if len(names) == 0 || names[len(names)-1] != 0 { - goto corrupt - } - i := bytes.IndexByte(names, '\x00') - if i < 0 { - goto corrupt - } - newName, target := names[0:i], names[i+1:len(names)-1] - req = &SymlinkRequest{ - Header: m.Header(), - NewName: string(newName), - Target: string(target), - } - - case opLink: - in := (*linkIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - newName := m.bytes()[unsafe.Sizeof(*in):] - if len(newName) < 2 || newName[len(newName)-1] != 0 { - goto corrupt - } - newName = newName[:len(newName)-1] - req = &LinkRequest{ - Header: m.Header(), - OldNode: NodeID(in.Oldnodeid), - NewName: string(newName), - } - - case opMknod: - size := mknodInSize(c.proto) - if m.len() < size { - goto corrupt - } - in := (*mknodIn)(m.data()) - name := m.bytes()[size:] - if len(name) < 2 || name[len(name)-1] != '\x00' { - goto corrupt - } - name = name[:len(name)-1] - r := &MknodRequest{ - Header: m.Header(), - Mode: fileMode(in.Mode), - Rdev: in.Rdev, - Name: string(name), - } - if c.proto.GE(Protocol{7, 12}) { - r.Umask = fileMode(in.Umask) & os.ModePerm - } - req = r - - case opMkdir: - size := mkdirInSize(c.proto) - if m.len() < size { - goto corrupt - } - in := (*mkdirIn)(m.data()) - name := m.bytes()[size:] - i := bytes.IndexByte(name, '\x00') - if i < 0 { - goto corrupt - } - r := &MkdirRequest{ - Header: m.Header(), - Name: string(name[:i]), - // observed on Linux: mkdirIn.Mode & syscall.S_IFMT == 0, - // and this causes fileMode to go into it's "no idea" - // code branch; enforce type to directory - Mode: fileMode((in.Mode &^ syscall.S_IFMT) | syscall.S_IFDIR), - } - if c.proto.GE(Protocol{7, 12}) { - r.Umask = fileMode(in.Umask) & os.ModePerm - } - req = r - - case opUnlink, opRmdir: - buf := m.bytes() - n := len(buf) - if n == 0 || buf[n-1] != '\x00' { - goto corrupt - } - req = &RemoveRequest{ - Header: m.Header(), - Name: string(buf[:n-1]), - Dir: m.hdr.Opcode == opRmdir, - } - - case opRename: - in := (*renameIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - newDirNodeID := NodeID(in.Newdir) - oldNew := m.bytes()[unsafe.Sizeof(*in):] - // oldNew should be "old\x00new\x00" - if len(oldNew) < 4 { - goto corrupt - } - if oldNew[len(oldNew)-1] != '\x00' { - goto corrupt - } - i := bytes.IndexByte(oldNew, '\x00') - if i < 0 { - goto corrupt - } - oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) - req = &RenameRequest{ - Header: m.Header(), - NewDir: newDirNodeID, - OldName: oldName, - NewName: newName, - } - - case opOpendir, opOpen: - in := (*openIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &OpenRequest{ - Header: m.Header(), - Dir: m.hdr.Opcode == opOpendir, - Flags: openFlags(in.Flags), - } - - case opRead, opReaddir: - in := (*readIn)(m.data()) - if m.len() < readInSize(c.proto) { - goto corrupt - } - r := &ReadRequest{ - Header: m.Header(), - Dir: m.hdr.Opcode == opReaddir, - Handle: HandleID(in.Fh), - Offset: int64(in.Offset), - Size: int(in.Size), - } - if c.proto.GE(Protocol{7, 9}) { - r.Flags = ReadFlags(in.ReadFlags) - r.LockOwner = in.LockOwner - r.FileFlags = openFlags(in.Flags) - } - req = r - - case opWrite: - in := (*writeIn)(m.data()) - if m.len() < writeInSize(c.proto) { - goto corrupt - } - r := &WriteRequest{ - Header: m.Header(), - Handle: HandleID(in.Fh), - Offset: int64(in.Offset), - Flags: WriteFlags(in.WriteFlags), - } - if c.proto.GE(Protocol{7, 9}) { - r.LockOwner = in.LockOwner - r.FileFlags = openFlags(in.Flags) - } - buf := m.bytes()[writeInSize(c.proto):] - if uint32(len(buf)) < in.Size { - goto corrupt - } - r.Data = buf - req = r - - case opStatfs: - req = &StatfsRequest{ - Header: m.Header(), - } - - case opRelease, opReleasedir: - in := (*releaseIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &ReleaseRequest{ - Header: m.Header(), - Dir: m.hdr.Opcode == opReleasedir, - Handle: HandleID(in.Fh), - Flags: openFlags(in.Flags), - ReleaseFlags: ReleaseFlags(in.ReleaseFlags), - LockOwner: in.LockOwner, - } - - case opFsync, opFsyncdir: - in := (*fsyncIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &FsyncRequest{ - Dir: m.hdr.Opcode == opFsyncdir, - Header: m.Header(), - Handle: HandleID(in.Fh), - Flags: in.FsyncFlags, - } - - case opSetxattr: - in := (*setxattrIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - m.off += int(unsafe.Sizeof(*in)) - name := m.bytes() - i := bytes.IndexByte(name, '\x00') - if i < 0 { - goto corrupt - } - xattr := name[i+1:] - if uint32(len(xattr)) < in.Size { - goto corrupt - } - xattr = xattr[:in.Size] - req = &SetxattrRequest{ - Header: m.Header(), - Flags: in.Flags, - Position: in.position(), - Name: string(name[:i]), - Xattr: xattr, - } - - case opGetxattr: - in := (*getxattrIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - name := m.bytes()[unsafe.Sizeof(*in):] - i := bytes.IndexByte(name, '\x00') - if i < 0 { - goto corrupt - } - req = &GetxattrRequest{ - Header: m.Header(), - Name: string(name[:i]), - Size: in.Size, - Position: in.position(), - } - - case opListxattr: - in := (*getxattrIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &ListxattrRequest{ - Header: m.Header(), - Size: in.Size, - Position: in.position(), - } - - case opRemovexattr: - buf := m.bytes() - n := len(buf) - if n == 0 || buf[n-1] != '\x00' { - goto corrupt - } - req = &RemovexattrRequest{ - Header: m.Header(), - Name: string(buf[:n-1]), - } - - case opFlush: - in := (*flushIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &FlushRequest{ - Header: m.Header(), - Handle: HandleID(in.Fh), - Flags: in.FlushFlags, - LockOwner: in.LockOwner, - } - - case opInit: - in := (*initIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &InitRequest{ - Header: m.Header(), - Kernel: Protocol{in.Major, in.Minor}, - MaxReadahead: in.MaxReadahead, - Flags: InitFlags(in.Flags), - } - - case opGetlk: - panic("opGetlk") - case opSetlk: - panic("opSetlk") - case opSetlkw: - panic("opSetlkw") - - case opAccess: - in := (*accessIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &AccessRequest{ - Header: m.Header(), - Mask: in.Mask, - } - - case opCreate: - size := createInSize(c.proto) - if m.len() < size { - goto corrupt - } - in := (*createIn)(m.data()) - name := m.bytes()[size:] - i := bytes.IndexByte(name, '\x00') - if i < 0 { - goto corrupt - } - r := &CreateRequest{ - Header: m.Header(), - Flags: openFlags(in.Flags), - Mode: fileMode(in.Mode), - Name: string(name[:i]), - } - if c.proto.GE(Protocol{7, 12}) { - r.Umask = fileMode(in.Umask) & os.ModePerm - } - req = r - - case opInterrupt: - in := (*interruptIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - req = &InterruptRequest{ - Header: m.Header(), - IntrID: RequestID(in.Unique), - } - - case opBmap: - panic("opBmap") - - case opDestroy: - req = &DestroyRequest{ - Header: m.Header(), - } - - // OS X - case opSetvolname: - panic("opSetvolname") - case opGetxtimes: - panic("opGetxtimes") - case opExchange: - in := (*exchangeIn)(m.data()) - if m.len() < unsafe.Sizeof(*in) { - goto corrupt - } - oldDirNodeID := NodeID(in.Olddir) - newDirNodeID := NodeID(in.Newdir) - oldNew := m.bytes()[unsafe.Sizeof(*in):] - // oldNew should be "oldname\x00newname\x00" - if len(oldNew) < 4 { - goto corrupt - } - if oldNew[len(oldNew)-1] != '\x00' { - goto corrupt - } - i := bytes.IndexByte(oldNew, '\x00') - if i < 0 { - goto corrupt - } - oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) - req = &ExchangeDataRequest{ - Header: m.Header(), - OldDir: oldDirNodeID, - NewDir: newDirNodeID, - OldName: oldName, - NewName: newName, - // TODO options - } - } - - return req, nil - -corrupt: - Debug(malformedMessage{}) - putMessage(m) - return nil, fmt.Errorf("fuse: malformed message") - -unrecognized: - // Unrecognized message. - // Assume higher-level code will send a "no idea what you mean" error. - h := m.Header() - return &h, nil -} - -type bugShortKernelWrite struct { - Written int64 - Length int64 - Error string - Stack string -} - -func (b bugShortKernelWrite) String() string { - return fmt.Sprintf("short kernel write: written=%d/%d error=%q stack=\n%s", b.Written, b.Length, b.Error, b.Stack) -} - -type bugKernelWriteError struct { - Error string - Stack string -} - -func (b bugKernelWriteError) String() string { - return fmt.Sprintf("kernel write error: error=%q stack=\n%s", b.Error, b.Stack) -} - -// safe to call even with nil error -func errorString(err error) string { - if err == nil { - return "" - } - return err.Error() -} - -func (c *Conn) writeToKernel(msg []byte) error { - out := (*outHeader)(unsafe.Pointer(&msg[0])) - out.Len = uint32(len(msg)) - - c.wio.RLock() - defer c.wio.RUnlock() - nn, err := syscall.Write(c.fd(), msg) - if err == nil && nn != len(msg) { - Debug(bugShortKernelWrite{ - Written: int64(nn), - Length: int64(len(msg)), - Error: errorString(err), - Stack: stack(), - }) - } - return err -} - -func (c *Conn) respond(msg []byte) { - if err := c.writeToKernel(msg); err != nil { - Debug(bugKernelWriteError{ - Error: errorString(err), - Stack: stack(), - }) - } -} - -type notCachedError struct{} - -func (notCachedError) Error() string { - return "node not cached" -} - -var _ ErrorNumber = notCachedError{} - -func (notCachedError) Errno() Errno { - // Behave just like if the original syscall.ENOENT had been passed - // straight through. - return ENOENT -} - -var ( - ErrNotCached = notCachedError{} -) - -// sendInvalidate sends an invalidate notification to kernel. -// -// A returned ENOENT is translated to a friendlier error. -func (c *Conn) sendInvalidate(msg []byte) error { - switch err := c.writeToKernel(msg); err { - case syscall.ENOENT: - return ErrNotCached - default: - return err - } -} - -// InvalidateNode invalidates the kernel cache of the attributes and a -// range of the data of a node. -// -// Giving offset 0 and size -1 means all data. To invalidate just the -// attributes, give offset 0 and size 0. -// -// Returns ErrNotCached if the kernel is not currently caching the -// node. -func (c *Conn) InvalidateNode(nodeID NodeID, off int64, size int64) error { - buf := newBuffer(unsafe.Sizeof(notifyInvalInodeOut{})) - h := (*outHeader)(unsafe.Pointer(&buf[0])) - // h.Unique is 0 - h.Error = notifyCodeInvalInode - out := (*notifyInvalInodeOut)(buf.alloc(unsafe.Sizeof(notifyInvalInodeOut{}))) - out.Ino = uint64(nodeID) - out.Off = off - out.Len = size - return c.sendInvalidate(buf) -} - -// InvalidateEntry invalidates the kernel cache of the directory entry -// identified by parent directory node ID and entry basename. -// -// Kernel may or may not cache directory listings. To invalidate -// those, use InvalidateNode to invalidate all of the data for a -// directory. (As of 2015-06, Linux FUSE does not cache directory -// listings.) -// -// Returns ErrNotCached if the kernel is not currently caching the -// node. -func (c *Conn) InvalidateEntry(parent NodeID, name string) error { - const maxUint32 = ^uint32(0) - if uint64(len(name)) > uint64(maxUint32) { - // very unlikely, but we don't want to silently truncate - return syscall.ENAMETOOLONG - } - buf := newBuffer(unsafe.Sizeof(notifyInvalEntryOut{}) + uintptr(len(name)) + 1) - h := (*outHeader)(unsafe.Pointer(&buf[0])) - // h.Unique is 0 - h.Error = notifyCodeInvalEntry - out := (*notifyInvalEntryOut)(buf.alloc(unsafe.Sizeof(notifyInvalEntryOut{}))) - out.Parent = uint64(parent) - out.Namelen = uint32(len(name)) - buf = append(buf, name...) - buf = append(buf, '\x00') - return c.sendInvalidate(buf) -} - -// An InitRequest is the first request sent on a FUSE file system. -type InitRequest struct { - Header `json:"-"` - Kernel Protocol - // Maximum readahead in bytes that the kernel plans to use. - MaxReadahead uint32 - Flags InitFlags -} - -var _ = Request(&InitRequest{}) - -func (r *InitRequest) String() string { - return fmt.Sprintf("Init [%v] %v ra=%d fl=%v", &r.Header, r.Kernel, r.MaxReadahead, r.Flags) -} - -// An InitResponse is the response to an InitRequest. -type InitResponse struct { - Library Protocol - // Maximum readahead in bytes that the kernel can use. Ignored if - // greater than InitRequest.MaxReadahead. - MaxReadahead uint32 - Flags InitFlags - // Maximum size of a single write operation. - // Linux enforces a minimum of 4 KiB. - MaxWrite uint32 -} - -func (r *InitResponse) String() string { - return fmt.Sprintf("Init %v ra=%d fl=%v w=%d", r.Library, r.MaxReadahead, r.Flags, r.MaxWrite) -} - -// Respond replies to the request with the given response. -func (r *InitRequest) Respond(resp *InitResponse) { - buf := newBuffer(unsafe.Sizeof(initOut{})) - out := (*initOut)(buf.alloc(unsafe.Sizeof(initOut{}))) - out.Major = resp.Library.Major - out.Minor = resp.Library.Minor - out.MaxReadahead = resp.MaxReadahead - out.Flags = uint32(resp.Flags) - out.MaxWrite = resp.MaxWrite - - // MaxWrite larger than our receive buffer would just lead to - // errors on large writes. - if out.MaxWrite > maxWrite { - out.MaxWrite = maxWrite - } - r.respond(buf) -} - -// A StatfsRequest requests information about the mounted file system. -type StatfsRequest struct { - Header `json:"-"` -} - -var _ = Request(&StatfsRequest{}) - -func (r *StatfsRequest) String() string { - return fmt.Sprintf("Statfs [%s]", &r.Header) -} - -// Respond replies to the request with the given response. -func (r *StatfsRequest) Respond(resp *StatfsResponse) { - buf := newBuffer(unsafe.Sizeof(statfsOut{})) - out := (*statfsOut)(buf.alloc(unsafe.Sizeof(statfsOut{}))) - out.St = kstatfs{ - Blocks: resp.Blocks, - Bfree: resp.Bfree, - Bavail: resp.Bavail, - Files: resp.Files, - Ffree: resp.Ffree, - Bsize: resp.Bsize, - Namelen: resp.Namelen, - Frsize: resp.Frsize, - } - r.respond(buf) -} - -// A StatfsResponse is the response to a StatfsRequest. -type StatfsResponse struct { - Blocks uint64 // Total data blocks in file system. - Bfree uint64 // Free blocks in file system. - Bavail uint64 // Free blocks in file system if you're not root. - Files uint64 // Total files in file system. - Ffree uint64 // Free files in file system. - Bsize uint32 // Block size - Namelen uint32 // Maximum file name length? - Frsize uint32 // Fragment size, smallest addressable data size in the file system. -} - -func (r *StatfsResponse) String() string { - return fmt.Sprintf("Statfs blocks=%d/%d/%d files=%d/%d bsize=%d frsize=%d namelen=%d", - r.Bavail, r.Bfree, r.Blocks, - r.Ffree, r.Files, - r.Bsize, - r.Frsize, - r.Namelen, - ) -} - -// An AccessRequest asks whether the file can be accessed -// for the purpose specified by the mask. -type AccessRequest struct { - Header `json:"-"` - Mask uint32 -} - -var _ = Request(&AccessRequest{}) - -func (r *AccessRequest) String() string { - return fmt.Sprintf("Access [%s] mask=%#x", &r.Header, r.Mask) -} - -// Respond replies to the request indicating that access is allowed. -// To deny access, use RespondError. -func (r *AccessRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// An Attr is the metadata for a single file or directory. -type Attr struct { - Valid time.Duration // how long Attr can be cached - - Inode uint64 // inode number - Size uint64 // size in bytes - Blocks uint64 // size in 512-byte units - Atime time.Time // time of last access - Mtime time.Time // time of last modification - Ctime time.Time // time of last inode change - Crtime time.Time // time of creation (OS X only) - Mode os.FileMode // file mode - Nlink uint32 // number of links (usually 1) - Uid uint32 // owner uid - Gid uint32 // group gid - Rdev uint32 // device numbers - Flags uint32 // chflags(2) flags (OS X only) - BlockSize uint32 // preferred blocksize for filesystem I/O -} - -func (a Attr) String() string { - return fmt.Sprintf("valid=%v ino=%v size=%d mode=%v", a.Valid, a.Inode, a.Size, a.Mode) -} - -func unix(t time.Time) (sec uint64, nsec uint32) { - nano := t.UnixNano() - sec = uint64(nano / 1e9) - nsec = uint32(nano % 1e9) - return -} - -func (a *Attr) attr(out *attr, proto Protocol) { - out.Ino = a.Inode - out.Size = a.Size - out.Blocks = a.Blocks - out.Atime, out.AtimeNsec = unix(a.Atime) - out.Mtime, out.MtimeNsec = unix(a.Mtime) - out.Ctime, out.CtimeNsec = unix(a.Ctime) - out.SetCrtime(unix(a.Crtime)) - out.Mode = uint32(a.Mode) & 0777 - switch { - default: - out.Mode |= syscall.S_IFREG - case a.Mode&os.ModeDir != 0: - out.Mode |= syscall.S_IFDIR - case a.Mode&os.ModeDevice != 0: - if a.Mode&os.ModeCharDevice != 0 { - out.Mode |= syscall.S_IFCHR - } else { - out.Mode |= syscall.S_IFBLK - } - case a.Mode&os.ModeNamedPipe != 0: - out.Mode |= syscall.S_IFIFO - case a.Mode&os.ModeSymlink != 0: - out.Mode |= syscall.S_IFLNK - case a.Mode&os.ModeSocket != 0: - out.Mode |= syscall.S_IFSOCK - } - if a.Mode&os.ModeSetuid != 0 { - out.Mode |= syscall.S_ISUID - } - if a.Mode&os.ModeSetgid != 0 { - out.Mode |= syscall.S_ISGID - } - out.Nlink = a.Nlink - out.Uid = a.Uid - out.Gid = a.Gid - out.Rdev = a.Rdev - out.SetFlags(a.Flags) - if proto.GE(Protocol{7, 9}) { - out.Blksize = a.BlockSize - } - - return -} - -// A GetattrRequest asks for the metadata for the file denoted by r.Node. -type GetattrRequest struct { - Header `json:"-"` - Flags GetattrFlags - Handle HandleID -} - -var _ = Request(&GetattrRequest{}) - -func (r *GetattrRequest) String() string { - return fmt.Sprintf("Getattr [%s] %v fl=%v", &r.Header, r.Handle, r.Flags) -} - -// Respond replies to the request with the given response. -func (r *GetattrRequest) Respond(resp *GetattrResponse) { - size := attrOutSize(r.Header.Conn.proto) - buf := newBuffer(size) - out := (*attrOut)(buf.alloc(size)) - out.AttrValid = uint64(resp.Attr.Valid / time.Second) - out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&out.Attr, r.Header.Conn.proto) - r.respond(buf) -} - -// A GetattrResponse is the response to a GetattrRequest. -type GetattrResponse struct { - Attr Attr // file attributes -} - -func (r *GetattrResponse) String() string { - return fmt.Sprintf("Getattr %v", r.Attr) -} - -// A GetxattrRequest asks for the extended attributes associated with r.Node. -type GetxattrRequest struct { - Header `json:"-"` - - // Maximum size to return. - Size uint32 - - // Name of the attribute requested. - Name string - - // Offset within extended attributes. - // - // Only valid for OS X, and then only with the resource fork - // attribute. - Position uint32 -} - -var _ = Request(&GetxattrRequest{}) - -func (r *GetxattrRequest) String() string { - return fmt.Sprintf("Getxattr [%s] %q %d @%d", &r.Header, r.Name, r.Size, r.Position) -} - -// Respond replies to the request with the given response. -func (r *GetxattrRequest) Respond(resp *GetxattrResponse) { - if r.Size == 0 { - buf := newBuffer(unsafe.Sizeof(getxattrOut{})) - out := (*getxattrOut)(buf.alloc(unsafe.Sizeof(getxattrOut{}))) - out.Size = uint32(len(resp.Xattr)) - r.respond(buf) - } else { - buf := newBuffer(uintptr(len(resp.Xattr))) - buf = append(buf, resp.Xattr...) - r.respond(buf) - } -} - -// A GetxattrResponse is the response to a GetxattrRequest. -type GetxattrResponse struct { - Xattr []byte -} - -func (r *GetxattrResponse) String() string { - return fmt.Sprintf("Getxattr %x", r.Xattr) -} - -// A ListxattrRequest asks to list the extended attributes associated with r.Node. -type ListxattrRequest struct { - Header `json:"-"` - Size uint32 // maximum size to return - Position uint32 // offset within attribute list -} - -var _ = Request(&ListxattrRequest{}) - -func (r *ListxattrRequest) String() string { - return fmt.Sprintf("Listxattr [%s] %d @%d", &r.Header, r.Size, r.Position) -} - -// Respond replies to the request with the given response. -func (r *ListxattrRequest) Respond(resp *ListxattrResponse) { - if r.Size == 0 { - buf := newBuffer(unsafe.Sizeof(getxattrOut{})) - out := (*getxattrOut)(buf.alloc(unsafe.Sizeof(getxattrOut{}))) - out.Size = uint32(len(resp.Xattr)) - r.respond(buf) - } else { - buf := newBuffer(uintptr(len(resp.Xattr))) - buf = append(buf, resp.Xattr...) - r.respond(buf) - } -} - -// A ListxattrResponse is the response to a ListxattrRequest. -type ListxattrResponse struct { - Xattr []byte -} - -func (r *ListxattrResponse) String() string { - return fmt.Sprintf("Listxattr %x", r.Xattr) -} - -// Append adds an extended attribute name to the response. -func (r *ListxattrResponse) Append(names ...string) { - for _, name := range names { - r.Xattr = append(r.Xattr, name...) - r.Xattr = append(r.Xattr, '\x00') - } -} - -// A RemovexattrRequest asks to remove an extended attribute associated with r.Node. -type RemovexattrRequest struct { - Header `json:"-"` - Name string // name of extended attribute -} - -var _ = Request(&RemovexattrRequest{}) - -func (r *RemovexattrRequest) String() string { - return fmt.Sprintf("Removexattr [%s] %q", &r.Header, r.Name) -} - -// Respond replies to the request, indicating that the attribute was removed. -func (r *RemovexattrRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// A SetxattrRequest asks to set an extended attribute associated with a file. -type SetxattrRequest struct { - Header `json:"-"` - - // Flags can make the request fail if attribute does/not already - // exist. Unfortunately, the constants are platform-specific and - // not exposed by Go1.2. Look for XATTR_CREATE, XATTR_REPLACE. - // - // TODO improve this later - // - // TODO XATTR_CREATE and exist -> EEXIST - // - // TODO XATTR_REPLACE and not exist -> ENODATA - Flags uint32 - - // Offset within extended attributes. - // - // Only valid for OS X, and then only with the resource fork - // attribute. - Position uint32 - - Name string - Xattr []byte -} - -var _ = Request(&SetxattrRequest{}) - -func trunc(b []byte, max int) ([]byte, string) { - if len(b) > max { - return b[:max], "..." - } - return b, "" -} - -func (r *SetxattrRequest) String() string { - xattr, tail := trunc(r.Xattr, 16) - return fmt.Sprintf("Setxattr [%s] %q %x%s fl=%v @%#x", &r.Header, r.Name, xattr, tail, r.Flags, r.Position) -} - -// Respond replies to the request, indicating that the extended attribute was set. -func (r *SetxattrRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// A LookupRequest asks to look up the given name in the directory named by r.Node. -type LookupRequest struct { - Header `json:"-"` - Name string -} - -var _ = Request(&LookupRequest{}) - -func (r *LookupRequest) String() string { - return fmt.Sprintf("Lookup [%s] %q", &r.Header, r.Name) -} - -// Respond replies to the request with the given response. -func (r *LookupRequest) Respond(resp *LookupResponse) { - size := entryOutSize(r.Header.Conn.proto) - buf := newBuffer(size) - out := (*entryOut)(buf.alloc(size)) - out.Nodeid = uint64(resp.Node) - out.Generation = resp.Generation - out.EntryValid = uint64(resp.EntryValid / time.Second) - out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) - out.AttrValid = uint64(resp.Attr.Valid / time.Second) - out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&out.Attr, r.Header.Conn.proto) - r.respond(buf) -} - -// A LookupResponse is the response to a LookupRequest. -type LookupResponse struct { - Node NodeID - Generation uint64 - EntryValid time.Duration - Attr Attr -} - -func (r *LookupResponse) string() string { - return fmt.Sprintf("%v gen=%d valid=%v attr={%v}", r.Node, r.Generation, r.EntryValid, r.Attr) -} - -func (r *LookupResponse) String() string { - return fmt.Sprintf("Lookup %s", r.string()) -} - -// An OpenRequest asks to open a file or directory -type OpenRequest struct { - Header `json:"-"` - Dir bool // is this Opendir? - Flags OpenFlags -} - -var _ = Request(&OpenRequest{}) - -func (r *OpenRequest) String() string { - return fmt.Sprintf("Open [%s] dir=%v fl=%v", &r.Header, r.Dir, r.Flags) -} - -// Respond replies to the request with the given response. -func (r *OpenRequest) Respond(resp *OpenResponse) { - buf := newBuffer(unsafe.Sizeof(openOut{})) - out := (*openOut)(buf.alloc(unsafe.Sizeof(openOut{}))) - out.Fh = uint64(resp.Handle) - out.OpenFlags = uint32(resp.Flags) - r.respond(buf) -} - -// A OpenResponse is the response to a OpenRequest. -type OpenResponse struct { - Handle HandleID - Flags OpenResponseFlags -} - -func (r *OpenResponse) string() string { - return fmt.Sprintf("%v fl=%v", r.Handle, r.Flags) -} - -func (r *OpenResponse) String() string { - return fmt.Sprintf("Open %s", r.string()) -} - -// A CreateRequest asks to create and open a file (not a directory). -type CreateRequest struct { - Header `json:"-"` - Name string - Flags OpenFlags - Mode os.FileMode - // Umask of the request. Not supported on OS X. - Umask os.FileMode -} - -var _ = Request(&CreateRequest{}) - -func (r *CreateRequest) String() string { - return fmt.Sprintf("Create [%s] %q fl=%v mode=%v umask=%v", &r.Header, r.Name, r.Flags, r.Mode, r.Umask) -} - -// Respond replies to the request with the given response. -func (r *CreateRequest) Respond(resp *CreateResponse) { - eSize := entryOutSize(r.Header.Conn.proto) - buf := newBuffer(eSize + unsafe.Sizeof(openOut{})) - - e := (*entryOut)(buf.alloc(eSize)) - e.Nodeid = uint64(resp.Node) - e.Generation = resp.Generation - e.EntryValid = uint64(resp.EntryValid / time.Second) - e.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) - e.AttrValid = uint64(resp.Attr.Valid / time.Second) - e.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&e.Attr, r.Header.Conn.proto) - - o := (*openOut)(buf.alloc(unsafe.Sizeof(openOut{}))) - o.Fh = uint64(resp.Handle) - o.OpenFlags = uint32(resp.Flags) - - r.respond(buf) -} - -// A CreateResponse is the response to a CreateRequest. -// It describes the created node and opened handle. -type CreateResponse struct { - LookupResponse - OpenResponse -} - -func (r *CreateResponse) String() string { - return fmt.Sprintf("Create {%s} {%s}", r.LookupResponse.string(), r.OpenResponse.string()) -} - -// A MkdirRequest asks to create (but not open) a directory. -type MkdirRequest struct { - Header `json:"-"` - Name string - Mode os.FileMode - // Umask of the request. Not supported on OS X. - Umask os.FileMode -} - -var _ = Request(&MkdirRequest{}) - -func (r *MkdirRequest) String() string { - return fmt.Sprintf("Mkdir [%s] %q mode=%v umask=%v", &r.Header, r.Name, r.Mode, r.Umask) -} - -// Respond replies to the request with the given response. -func (r *MkdirRequest) Respond(resp *MkdirResponse) { - size := entryOutSize(r.Header.Conn.proto) - buf := newBuffer(size) - out := (*entryOut)(buf.alloc(size)) - out.Nodeid = uint64(resp.Node) - out.Generation = resp.Generation - out.EntryValid = uint64(resp.EntryValid / time.Second) - out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) - out.AttrValid = uint64(resp.Attr.Valid / time.Second) - out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&out.Attr, r.Header.Conn.proto) - r.respond(buf) -} - -// A MkdirResponse is the response to a MkdirRequest. -type MkdirResponse struct { - LookupResponse -} - -func (r *MkdirResponse) String() string { - return fmt.Sprintf("Mkdir %v", r.LookupResponse.string()) -} - -// A ReadRequest asks to read from an open file. -type ReadRequest struct { - Header `json:"-"` - Dir bool // is this Readdir? - Handle HandleID - Offset int64 - Size int - Flags ReadFlags - LockOwner uint64 - FileFlags OpenFlags -} - -var _ = Request(&ReadRequest{}) - -func (r *ReadRequest) String() string { - return fmt.Sprintf("Read [%s] %v %d @%#x dir=%v fl=%v lock=%d ffl=%v", &r.Header, r.Handle, r.Size, r.Offset, r.Dir, r.Flags, r.LockOwner, r.FileFlags) -} - -// Respond replies to the request with the given response. -func (r *ReadRequest) Respond(resp *ReadResponse) { - buf := newBuffer(uintptr(len(resp.Data))) - buf = append(buf, resp.Data...) - r.respond(buf) -} - -// A ReadResponse is the response to a ReadRequest. -type ReadResponse struct { - Data []byte -} - -func (r *ReadResponse) String() string { - return fmt.Sprintf("Read %d", len(r.Data)) -} - -type jsonReadResponse struct { - Len uint64 -} - -func (r *ReadResponse) MarshalJSON() ([]byte, error) { - j := jsonReadResponse{ - Len: uint64(len(r.Data)), - } - return json.Marshal(j) -} - -// A ReleaseRequest asks to release (close) an open file handle. -type ReleaseRequest struct { - Header `json:"-"` - Dir bool // is this Releasedir? - Handle HandleID - Flags OpenFlags // flags from OpenRequest - ReleaseFlags ReleaseFlags - LockOwner uint32 -} - -var _ = Request(&ReleaseRequest{}) - -func (r *ReleaseRequest) String() string { - return fmt.Sprintf("Release [%s] %v fl=%v rfl=%v owner=%#x", &r.Header, r.Handle, r.Flags, r.ReleaseFlags, r.LockOwner) -} - -// Respond replies to the request, indicating that the handle has been released. -func (r *ReleaseRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// A DestroyRequest is sent by the kernel when unmounting the file system. -// No more requests will be received after this one, but it should still be -// responded to. -type DestroyRequest struct { - Header `json:"-"` -} - -var _ = Request(&DestroyRequest{}) - -func (r *DestroyRequest) String() string { - return fmt.Sprintf("Destroy [%s]", &r.Header) -} - -// Respond replies to the request. -func (r *DestroyRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// A ForgetRequest is sent by the kernel when forgetting about r.Node -// as returned by r.N lookup requests. -type ForgetRequest struct { - Header `json:"-"` - N uint64 -} - -var _ = Request(&ForgetRequest{}) - -func (r *ForgetRequest) String() string { - return fmt.Sprintf("Forget [%s] %d", &r.Header, r.N) -} - -// Respond replies to the request, indicating that the forgetfulness has been recorded. -func (r *ForgetRequest) Respond() { - // Don't reply to forget messages. - r.noResponse() -} - -// A Dirent represents a single directory entry. -type Dirent struct { - // Inode this entry names. - Inode uint64 - - // Type of the entry, for example DT_File. - // - // Setting this is optional. The zero value (DT_Unknown) means - // callers will just need to do a Getattr when the type is - // needed. Providing a type can speed up operations - // significantly. - Type DirentType - - // Name of the entry - Name string -} - -// Type of an entry in a directory listing. -type DirentType uint32 - -const ( - // These don't quite match os.FileMode; especially there's an - // explicit unknown, instead of zero value meaning file. They - // are also not quite syscall.DT_*; nothing says the FUSE - // protocol follows those, and even if they were, we don't - // want each fs to fiddle with syscall. - - // The shift by 12 is hardcoded in the FUSE userspace - // low-level C library, so it's safe here. - - DT_Unknown DirentType = 0 - DT_Socket DirentType = syscall.S_IFSOCK >> 12 - DT_Link DirentType = syscall.S_IFLNK >> 12 - DT_File DirentType = syscall.S_IFREG >> 12 - DT_Block DirentType = syscall.S_IFBLK >> 12 - DT_Dir DirentType = syscall.S_IFDIR >> 12 - DT_Char DirentType = syscall.S_IFCHR >> 12 - DT_FIFO DirentType = syscall.S_IFIFO >> 12 -) - -func (t DirentType) String() string { - switch t { - case DT_Unknown: - return "unknown" - case DT_Socket: - return "socket" - case DT_Link: - return "link" - case DT_File: - return "file" - case DT_Block: - return "block" - case DT_Dir: - return "dir" - case DT_Char: - return "char" - case DT_FIFO: - return "fifo" - } - return "invalid" -} - -// AppendDirent appends the encoded form of a directory entry to data -// and returns the resulting slice. -func AppendDirent(data []byte, dir Dirent) []byte { - de := dirent{ - Ino: dir.Inode, - Namelen: uint32(len(dir.Name)), - Type: uint32(dir.Type), - } - de.Off = uint64(len(data) + direntSize + (len(dir.Name)+7)&^7) - data = append(data, (*[direntSize]byte)(unsafe.Pointer(&de))[:]...) - data = append(data, dir.Name...) - n := direntSize + uintptr(len(dir.Name)) - if n%8 != 0 { - var pad [8]byte - data = append(data, pad[:8-n%8]...) - } - return data -} - -// A WriteRequest asks to write to an open file. -type WriteRequest struct { - Header - Handle HandleID - Offset int64 - Data []byte - Flags WriteFlags - LockOwner uint64 - FileFlags OpenFlags -} - -var _ = Request(&WriteRequest{}) - -func (r *WriteRequest) String() string { - return fmt.Sprintf("Write [%s] %v %d @%d fl=%v lock=%d ffl=%v", &r.Header, r.Handle, len(r.Data), r.Offset, r.Flags, r.LockOwner, r.FileFlags) -} - -type jsonWriteRequest struct { - Handle HandleID - Offset int64 - Len uint64 - Flags WriteFlags -} - -func (r *WriteRequest) MarshalJSON() ([]byte, error) { - j := jsonWriteRequest{ - Handle: r.Handle, - Offset: r.Offset, - Len: uint64(len(r.Data)), - Flags: r.Flags, - } - return json.Marshal(j) -} - -// Respond replies to the request with the given response. -func (r *WriteRequest) Respond(resp *WriteResponse) { - buf := newBuffer(unsafe.Sizeof(writeOut{})) - out := (*writeOut)(buf.alloc(unsafe.Sizeof(writeOut{}))) - out.Size = uint32(resp.Size) - r.respond(buf) -} - -// A WriteResponse replies to a write indicating how many bytes were written. -type WriteResponse struct { - Size int -} - -func (r *WriteResponse) String() string { - return fmt.Sprintf("Write %d", r.Size) -} - -// A SetattrRequest asks to change one or more attributes associated with a file, -// as indicated by Valid. -type SetattrRequest struct { - Header `json:"-"` - Valid SetattrValid - Handle HandleID - Size uint64 - Atime time.Time - Mtime time.Time - Mode os.FileMode - Uid uint32 - Gid uint32 - - // OS X only - Bkuptime time.Time - Chgtime time.Time - Crtime time.Time - Flags uint32 // see chflags(2) -} - -var _ = Request(&SetattrRequest{}) - -func (r *SetattrRequest) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "Setattr [%s]", &r.Header) - if r.Valid.Mode() { - fmt.Fprintf(&buf, " mode=%v", r.Mode) - } - if r.Valid.Uid() { - fmt.Fprintf(&buf, " uid=%d", r.Uid) - } - if r.Valid.Gid() { - fmt.Fprintf(&buf, " gid=%d", r.Gid) - } - if r.Valid.Size() { - fmt.Fprintf(&buf, " size=%d", r.Size) - } - if r.Valid.Atime() { - fmt.Fprintf(&buf, " atime=%v", r.Atime) - } - if r.Valid.AtimeNow() { - fmt.Fprintf(&buf, " atime=now") - } - if r.Valid.Mtime() { - fmt.Fprintf(&buf, " mtime=%v", r.Mtime) - } - if r.Valid.MtimeNow() { - fmt.Fprintf(&buf, " mtime=now") - } - if r.Valid.Handle() { - fmt.Fprintf(&buf, " handle=%v", r.Handle) - } else { - fmt.Fprintf(&buf, " handle=INVALID-%v", r.Handle) - } - if r.Valid.LockOwner() { - fmt.Fprintf(&buf, " lockowner") - } - if r.Valid.Crtime() { - fmt.Fprintf(&buf, " crtime=%v", r.Crtime) - } - if r.Valid.Chgtime() { - fmt.Fprintf(&buf, " chgtime=%v", r.Chgtime) - } - if r.Valid.Bkuptime() { - fmt.Fprintf(&buf, " bkuptime=%v", r.Bkuptime) - } - if r.Valid.Flags() { - fmt.Fprintf(&buf, " flags=%v", r.Flags) - } - return buf.String() -} - -// Respond replies to the request with the given response, -// giving the updated attributes. -func (r *SetattrRequest) Respond(resp *SetattrResponse) { - size := attrOutSize(r.Header.Conn.proto) - buf := newBuffer(size) - out := (*attrOut)(buf.alloc(size)) - out.AttrValid = uint64(resp.Attr.Valid / time.Second) - out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&out.Attr, r.Header.Conn.proto) - r.respond(buf) -} - -// A SetattrResponse is the response to a SetattrRequest. -type SetattrResponse struct { - Attr Attr // file attributes -} - -func (r *SetattrResponse) String() string { - return fmt.Sprintf("Setattr %v", r.Attr) -} - -// A FlushRequest asks for the current state of an open file to be flushed -// to storage, as when a file descriptor is being closed. A single opened Handle -// may receive multiple FlushRequests over its lifetime. -type FlushRequest struct { - Header `json:"-"` - Handle HandleID - Flags uint32 - LockOwner uint64 -} - -var _ = Request(&FlushRequest{}) - -func (r *FlushRequest) String() string { - return fmt.Sprintf("Flush [%s] %v fl=%#x lk=%#x", &r.Header, r.Handle, r.Flags, r.LockOwner) -} - -// Respond replies to the request, indicating that the flush succeeded. -func (r *FlushRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// A RemoveRequest asks to remove a file or directory from the -// directory r.Node. -type RemoveRequest struct { - Header `json:"-"` - Name string // name of the entry to remove - Dir bool // is this rmdir? -} - -var _ = Request(&RemoveRequest{}) - -func (r *RemoveRequest) String() string { - return fmt.Sprintf("Remove [%s] %q dir=%v", &r.Header, r.Name, r.Dir) -} - -// Respond replies to the request, indicating that the file was removed. -func (r *RemoveRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// A SymlinkRequest is a request to create a symlink making NewName point to Target. -type SymlinkRequest struct { - Header `json:"-"` - NewName, Target string -} - -var _ = Request(&SymlinkRequest{}) - -func (r *SymlinkRequest) String() string { - return fmt.Sprintf("Symlink [%s] from %q to target %q", &r.Header, r.NewName, r.Target) -} - -// Respond replies to the request, indicating that the symlink was created. -func (r *SymlinkRequest) Respond(resp *SymlinkResponse) { - size := entryOutSize(r.Header.Conn.proto) - buf := newBuffer(size) - out := (*entryOut)(buf.alloc(size)) - out.Nodeid = uint64(resp.Node) - out.Generation = resp.Generation - out.EntryValid = uint64(resp.EntryValid / time.Second) - out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) - out.AttrValid = uint64(resp.Attr.Valid / time.Second) - out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&out.Attr, r.Header.Conn.proto) - r.respond(buf) -} - -// A SymlinkResponse is the response to a SymlinkRequest. -type SymlinkResponse struct { - LookupResponse -} - -func (r *SymlinkResponse) String() string { - return fmt.Sprintf("Symlink %v", r.LookupResponse.string()) -} - -// A ReadlinkRequest is a request to read a symlink's target. -type ReadlinkRequest struct { - Header `json:"-"` -} - -var _ = Request(&ReadlinkRequest{}) - -func (r *ReadlinkRequest) String() string { - return fmt.Sprintf("Readlink [%s]", &r.Header) -} - -func (r *ReadlinkRequest) Respond(target string) { - buf := newBuffer(uintptr(len(target))) - buf = append(buf, target...) - r.respond(buf) -} - -// A LinkRequest is a request to create a hard link. -type LinkRequest struct { - Header `json:"-"` - OldNode NodeID - NewName string -} - -var _ = Request(&LinkRequest{}) - -func (r *LinkRequest) String() string { - return fmt.Sprintf("Link [%s] node %d to %q", &r.Header, r.OldNode, r.NewName) -} - -func (r *LinkRequest) Respond(resp *LookupResponse) { - size := entryOutSize(r.Header.Conn.proto) - buf := newBuffer(size) - out := (*entryOut)(buf.alloc(size)) - out.Nodeid = uint64(resp.Node) - out.Generation = resp.Generation - out.EntryValid = uint64(resp.EntryValid / time.Second) - out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) - out.AttrValid = uint64(resp.Attr.Valid / time.Second) - out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&out.Attr, r.Header.Conn.proto) - r.respond(buf) -} - -// A RenameRequest is a request to rename a file. -type RenameRequest struct { - Header `json:"-"` - NewDir NodeID - OldName, NewName string -} - -var _ = Request(&RenameRequest{}) - -func (r *RenameRequest) String() string { - return fmt.Sprintf("Rename [%s] from %q to dirnode %v %q", &r.Header, r.OldName, r.NewDir, r.NewName) -} - -func (r *RenameRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -type MknodRequest struct { - Header `json:"-"` - Name string - Mode os.FileMode - Rdev uint32 - // Umask of the request. Not supported on OS X. - Umask os.FileMode -} - -var _ = Request(&MknodRequest{}) - -func (r *MknodRequest) String() string { - return fmt.Sprintf("Mknod [%s] Name %q mode=%v umask=%v rdev=%d", &r.Header, r.Name, r.Mode, r.Umask, r.Rdev) -} - -func (r *MknodRequest) Respond(resp *LookupResponse) { - size := entryOutSize(r.Header.Conn.proto) - buf := newBuffer(size) - out := (*entryOut)(buf.alloc(size)) - out.Nodeid = uint64(resp.Node) - out.Generation = resp.Generation - out.EntryValid = uint64(resp.EntryValid / time.Second) - out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) - out.AttrValid = uint64(resp.Attr.Valid / time.Second) - out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) - resp.Attr.attr(&out.Attr, r.Header.Conn.proto) - r.respond(buf) -} - -type FsyncRequest struct { - Header `json:"-"` - Handle HandleID - // TODO bit 1 is datasync, not well documented upstream - Flags uint32 - Dir bool -} - -var _ = Request(&FsyncRequest{}) - -func (r *FsyncRequest) String() string { - return fmt.Sprintf("Fsync [%s] Handle %v Flags %v", &r.Header, r.Handle, r.Flags) -} - -func (r *FsyncRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} - -// An InterruptRequest is a request to interrupt another pending request. The -// response to that request should return an error status of EINTR. -type InterruptRequest struct { - Header `json:"-"` - IntrID RequestID // ID of the request to be interrupt. -} - -var _ = Request(&InterruptRequest{}) - -func (r *InterruptRequest) Respond() { - // nothing to do here - r.noResponse() -} - -func (r *InterruptRequest) String() string { - return fmt.Sprintf("Interrupt [%s] ID %v", &r.Header, r.IntrID) -} - -// An ExchangeDataRequest is a request to exchange the contents of two -// files, while leaving most metadata untouched. -// -// This request comes from OS X exchangedata(2) and represents its -// specific semantics. Crucially, it is very different from Linux -// renameat(2) RENAME_EXCHANGE. -// -// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html -type ExchangeDataRequest struct { - Header `json:"-"` - OldDir, NewDir NodeID - OldName, NewName string - // TODO options -} - -var _ = Request(&ExchangeDataRequest{}) - -func (r *ExchangeDataRequest) String() string { - // TODO options - return fmt.Sprintf("ExchangeData [%s] %v %q and %v %q", &r.Header, r.OldDir, r.OldName, r.NewDir, r.NewName) -} - -func (r *ExchangeDataRequest) Respond() { - buf := newBuffer(0) - r.respond(buf) -} diff --git a/vendor/bazil.org/fuse/fuse_darwin.go b/vendor/bazil.org/fuse/fuse_darwin.go deleted file mode 100644 index b58dca97d..000000000 --- a/vendor/bazil.org/fuse/fuse_darwin.go +++ /dev/null @@ -1,9 +0,0 @@ -package fuse - -// Maximum file write size we are prepared to receive from the kernel. -// -// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will -// forcibly close the /dev/fuse file descriptor on a Setxattr with a -// 16MB value. See TestSetxattr16MB and -// https://github.com/bazil/fuse/issues/42 -const maxWrite = 16 * 1024 * 1024 diff --git a/vendor/bazil.org/fuse/fuse_freebsd.go b/vendor/bazil.org/fuse/fuse_freebsd.go deleted file mode 100644 index 4aa83a0d4..000000000 --- a/vendor/bazil.org/fuse/fuse_freebsd.go +++ /dev/null @@ -1,6 +0,0 @@ -package fuse - -// Maximum file write size we are prepared to receive from the kernel. -// -// This number is just a guess. -const maxWrite = 128 * 1024 diff --git a/vendor/bazil.org/fuse/fuse_kernel.go b/vendor/bazil.org/fuse/fuse_kernel.go deleted file mode 100644 index 87c5ca1dc..000000000 --- a/vendor/bazil.org/fuse/fuse_kernel.go +++ /dev/null @@ -1,774 +0,0 @@ -// See the file LICENSE for copyright and licensing information. - -// Derived from FUSE's fuse_kernel.h, which carries this notice: -/* - This file defines the kernel interface of FUSE - Copyright (C) 2001-2007 Miklos Szeredi - - - This -- and only this -- header file may also be distributed under - the terms of the BSD Licence as follows: - - Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - SUCH DAMAGE. -*/ - -package fuse - -import ( - "fmt" - "syscall" - "unsafe" -) - -// The FUSE version implemented by the package. -const ( - protoVersionMinMajor = 7 - protoVersionMinMinor = 8 - protoVersionMaxMajor = 7 - protoVersionMaxMinor = 12 -) - -const ( - rootID = 1 -) - -type kstatfs struct { - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Bsize uint32 - Namelen uint32 - Frsize uint32 - _ uint32 - Spare [6]uint32 -} - -type fileLock struct { - Start uint64 - End uint64 - Type uint32 - Pid uint32 -} - -// GetattrFlags are bit flags that can be seen in GetattrRequest. -type GetattrFlags uint32 - -const ( - // Indicates the handle is valid. - GetattrFh GetattrFlags = 1 << 0 -) - -var getattrFlagsNames = []flagName{ - {uint32(GetattrFh), "GetattrFh"}, -} - -func (fl GetattrFlags) String() string { - return flagString(uint32(fl), getattrFlagsNames) -} - -// The SetattrValid are bit flags describing which fields in the SetattrRequest -// are included in the change. -type SetattrValid uint32 - -const ( - SetattrMode SetattrValid = 1 << 0 - SetattrUid SetattrValid = 1 << 1 - SetattrGid SetattrValid = 1 << 2 - SetattrSize SetattrValid = 1 << 3 - SetattrAtime SetattrValid = 1 << 4 - SetattrMtime SetattrValid = 1 << 5 - SetattrHandle SetattrValid = 1 << 6 - - // Linux only(?) - SetattrAtimeNow SetattrValid = 1 << 7 - SetattrMtimeNow SetattrValid = 1 << 8 - SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html - - // OS X only - SetattrCrtime SetattrValid = 1 << 28 - SetattrChgtime SetattrValid = 1 << 29 - SetattrBkuptime SetattrValid = 1 << 30 - SetattrFlags SetattrValid = 1 << 31 -) - -func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 } -func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 } -func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 } -func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 } -func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 } -func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 } -func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 } -func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 } -func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 } -func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 } -func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 } -func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 } -func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 } -func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 } - -func (fl SetattrValid) String() string { - return flagString(uint32(fl), setattrValidNames) -} - -var setattrValidNames = []flagName{ - {uint32(SetattrMode), "SetattrMode"}, - {uint32(SetattrUid), "SetattrUid"}, - {uint32(SetattrGid), "SetattrGid"}, - {uint32(SetattrSize), "SetattrSize"}, - {uint32(SetattrAtime), "SetattrAtime"}, - {uint32(SetattrMtime), "SetattrMtime"}, - {uint32(SetattrHandle), "SetattrHandle"}, - {uint32(SetattrAtimeNow), "SetattrAtimeNow"}, - {uint32(SetattrMtimeNow), "SetattrMtimeNow"}, - {uint32(SetattrLockOwner), "SetattrLockOwner"}, - {uint32(SetattrCrtime), "SetattrCrtime"}, - {uint32(SetattrChgtime), "SetattrChgtime"}, - {uint32(SetattrBkuptime), "SetattrBkuptime"}, - {uint32(SetattrFlags), "SetattrFlags"}, -} - -// Flags that can be seen in OpenRequest.Flags. -const ( - // Access modes. These are not 1-bit flags, but alternatives where - // only one can be chosen. See the IsReadOnly etc convenience - // methods. - OpenReadOnly OpenFlags = syscall.O_RDONLY - OpenWriteOnly OpenFlags = syscall.O_WRONLY - OpenReadWrite OpenFlags = syscall.O_RDWR - - // File was opened in append-only mode, all writes will go to end - // of file. OS X does not provide this information. - OpenAppend OpenFlags = syscall.O_APPEND - OpenCreate OpenFlags = syscall.O_CREAT - OpenDirectory OpenFlags = syscall.O_DIRECTORY - OpenExclusive OpenFlags = syscall.O_EXCL - OpenNonblock OpenFlags = syscall.O_NONBLOCK - OpenSync OpenFlags = syscall.O_SYNC - OpenTruncate OpenFlags = syscall.O_TRUNC -) - -// OpenAccessModeMask is a bitmask that separates the access mode -// from the other flags in OpenFlags. -const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE - -// OpenFlags are the O_FOO flags passed to open/create/etc calls. For -// example, os.O_WRONLY | os.O_APPEND. -type OpenFlags uint32 - -func (fl OpenFlags) String() string { - // O_RDONLY, O_RWONLY, O_RDWR are not flags - s := accModeName(fl & OpenAccessModeMask) - flags := uint32(fl &^ OpenAccessModeMask) - if flags != 0 { - s = s + "+" + flagString(flags, openFlagNames) - } - return s -} - -// Return true if OpenReadOnly is set. -func (fl OpenFlags) IsReadOnly() bool { - return fl&OpenAccessModeMask == OpenReadOnly -} - -// Return true if OpenWriteOnly is set. -func (fl OpenFlags) IsWriteOnly() bool { - return fl&OpenAccessModeMask == OpenWriteOnly -} - -// Return true if OpenReadWrite is set. -func (fl OpenFlags) IsReadWrite() bool { - return fl&OpenAccessModeMask == OpenReadWrite -} - -func accModeName(flags OpenFlags) string { - switch flags { - case OpenReadOnly: - return "OpenReadOnly" - case OpenWriteOnly: - return "OpenWriteOnly" - case OpenReadWrite: - return "OpenReadWrite" - default: - return "" - } -} - -var openFlagNames = []flagName{ - {uint32(OpenAppend), "OpenAppend"}, - {uint32(OpenCreate), "OpenCreate"}, - {uint32(OpenDirectory), "OpenDirectory"}, - {uint32(OpenExclusive), "OpenExclusive"}, - {uint32(OpenNonblock), "OpenNonblock"}, - {uint32(OpenSync), "OpenSync"}, - {uint32(OpenTruncate), "OpenTruncate"}, -} - -// The OpenResponseFlags are returned in the OpenResponse. -type OpenResponseFlags uint32 - -const ( - OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file - OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open - OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X) - - OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X - OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X -) - -func (fl OpenResponseFlags) String() string { - return flagString(uint32(fl), openResponseFlagNames) -} - -var openResponseFlagNames = []flagName{ - {uint32(OpenDirectIO), "OpenDirectIO"}, - {uint32(OpenKeepCache), "OpenKeepCache"}, - {uint32(OpenNonSeekable), "OpenNonSeekable"}, - {uint32(OpenPurgeAttr), "OpenPurgeAttr"}, - {uint32(OpenPurgeUBC), "OpenPurgeUBC"}, -} - -// The InitFlags are used in the Init exchange. -type InitFlags uint32 - -const ( - InitAsyncRead InitFlags = 1 << 0 - InitPosixLocks InitFlags = 1 << 1 - InitFileOps InitFlags = 1 << 2 - InitAtomicTrunc InitFlags = 1 << 3 - InitExportSupport InitFlags = 1 << 4 - InitBigWrites InitFlags = 1 << 5 - // Do not mask file access modes with umask. Not supported on OS X. - InitDontMask InitFlags = 1 << 6 - InitSpliceWrite InitFlags = 1 << 7 - InitSpliceMove InitFlags = 1 << 8 - InitSpliceRead InitFlags = 1 << 9 - InitFlockLocks InitFlags = 1 << 10 - InitHasIoctlDir InitFlags = 1 << 11 - InitAutoInvalData InitFlags = 1 << 12 - InitDoReaddirplus InitFlags = 1 << 13 - InitReaddirplusAuto InitFlags = 1 << 14 - InitAsyncDIO InitFlags = 1 << 15 - InitWritebackCache InitFlags = 1 << 16 - InitNoOpenSupport InitFlags = 1 << 17 - - InitCaseSensitive InitFlags = 1 << 29 // OS X only - InitVolRename InitFlags = 1 << 30 // OS X only - InitXtimes InitFlags = 1 << 31 // OS X only -) - -type flagName struct { - bit uint32 - name string -} - -var initFlagNames = []flagName{ - {uint32(InitAsyncRead), "InitAsyncRead"}, - {uint32(InitPosixLocks), "InitPosixLocks"}, - {uint32(InitFileOps), "InitFileOps"}, - {uint32(InitAtomicTrunc), "InitAtomicTrunc"}, - {uint32(InitExportSupport), "InitExportSupport"}, - {uint32(InitBigWrites), "InitBigWrites"}, - {uint32(InitDontMask), "InitDontMask"}, - {uint32(InitSpliceWrite), "InitSpliceWrite"}, - {uint32(InitSpliceMove), "InitSpliceMove"}, - {uint32(InitSpliceRead), "InitSpliceRead"}, - {uint32(InitFlockLocks), "InitFlockLocks"}, - {uint32(InitHasIoctlDir), "InitHasIoctlDir"}, - {uint32(InitAutoInvalData), "InitAutoInvalData"}, - {uint32(InitDoReaddirplus), "InitDoReaddirplus"}, - {uint32(InitReaddirplusAuto), "InitReaddirplusAuto"}, - {uint32(InitAsyncDIO), "InitAsyncDIO"}, - {uint32(InitWritebackCache), "InitWritebackCache"}, - {uint32(InitNoOpenSupport), "InitNoOpenSupport"}, - - {uint32(InitCaseSensitive), "InitCaseSensitive"}, - {uint32(InitVolRename), "InitVolRename"}, - {uint32(InitXtimes), "InitXtimes"}, -} - -func (fl InitFlags) String() string { - return flagString(uint32(fl), initFlagNames) -} - -func flagString(f uint32, names []flagName) string { - var s string - - if f == 0 { - return "0" - } - - for _, n := range names { - if f&n.bit != 0 { - s += "+" + n.name - f &^= n.bit - } - } - if f != 0 { - s += fmt.Sprintf("%+#x", f) - } - return s[1:] -} - -// The ReleaseFlags are used in the Release exchange. -type ReleaseFlags uint32 - -const ( - ReleaseFlush ReleaseFlags = 1 << 0 -) - -func (fl ReleaseFlags) String() string { - return flagString(uint32(fl), releaseFlagNames) -} - -var releaseFlagNames = []flagName{ - {uint32(ReleaseFlush), "ReleaseFlush"}, -} - -// Opcodes -const ( - opLookup = 1 - opForget = 2 // no reply - opGetattr = 3 - opSetattr = 4 - opReadlink = 5 - opSymlink = 6 - opMknod = 8 - opMkdir = 9 - opUnlink = 10 - opRmdir = 11 - opRename = 12 - opLink = 13 - opOpen = 14 - opRead = 15 - opWrite = 16 - opStatfs = 17 - opRelease = 18 - opFsync = 20 - opSetxattr = 21 - opGetxattr = 22 - opListxattr = 23 - opRemovexattr = 24 - opFlush = 25 - opInit = 26 - opOpendir = 27 - opReaddir = 28 - opReleasedir = 29 - opFsyncdir = 30 - opGetlk = 31 - opSetlk = 32 - opSetlkw = 33 - opAccess = 34 - opCreate = 35 - opInterrupt = 36 - opBmap = 37 - opDestroy = 38 - opIoctl = 39 // Linux? - opPoll = 40 // Linux? - - // OS X - opSetvolname = 61 - opGetxtimes = 62 - opExchange = 63 -) - -type entryOut struct { - Nodeid uint64 // Inode ID - Generation uint64 // Inode generation - EntryValid uint64 // Cache timeout for the name - AttrValid uint64 // Cache timeout for the attributes - EntryValidNsec uint32 - AttrValidNsec uint32 - Attr attr -} - -func entryOutSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 9}): - return unsafe.Offsetof(entryOut{}.Attr) + unsafe.Offsetof(entryOut{}.Attr.Blksize) - default: - return unsafe.Sizeof(entryOut{}) - } -} - -type forgetIn struct { - Nlookup uint64 -} - -type getattrIn struct { - GetattrFlags uint32 - _ uint32 - Fh uint64 -} - -type attrOut struct { - AttrValid uint64 // Cache timeout for the attributes - AttrValidNsec uint32 - _ uint32 - Attr attr -} - -func attrOutSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 9}): - return unsafe.Offsetof(attrOut{}.Attr) + unsafe.Offsetof(attrOut{}.Attr.Blksize) - default: - return unsafe.Sizeof(attrOut{}) - } -} - -// OS X -type getxtimesOut struct { - Bkuptime uint64 - Crtime uint64 - BkuptimeNsec uint32 - CrtimeNsec uint32 -} - -type mknodIn struct { - Mode uint32 - Rdev uint32 - Umask uint32 - _ uint32 - // "filename\x00" follows. -} - -func mknodInSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 12}): - return unsafe.Offsetof(mknodIn{}.Umask) - default: - return unsafe.Sizeof(mknodIn{}) - } -} - -type mkdirIn struct { - Mode uint32 - Umask uint32 - // filename follows -} - -func mkdirInSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 12}): - return unsafe.Offsetof(mkdirIn{}.Umask) + 4 - default: - return unsafe.Sizeof(mkdirIn{}) - } -} - -type renameIn struct { - Newdir uint64 - // "oldname\x00newname\x00" follows -} - -// OS X -type exchangeIn struct { - Olddir uint64 - Newdir uint64 - Options uint64 - // "oldname\x00newname\x00" follows -} - -type linkIn struct { - Oldnodeid uint64 -} - -type setattrInCommon struct { - Valid uint32 - _ uint32 - Fh uint64 - Size uint64 - LockOwner uint64 // unused on OS X? - Atime uint64 - Mtime uint64 - Unused2 uint64 - AtimeNsec uint32 - MtimeNsec uint32 - Unused3 uint32 - Mode uint32 - Unused4 uint32 - Uid uint32 - Gid uint32 - Unused5 uint32 -} - -type openIn struct { - Flags uint32 - Unused uint32 -} - -type openOut struct { - Fh uint64 - OpenFlags uint32 - _ uint32 -} - -type createIn struct { - Flags uint32 - Mode uint32 - Umask uint32 - _ uint32 -} - -func createInSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 12}): - return unsafe.Offsetof(createIn{}.Umask) - default: - return unsafe.Sizeof(createIn{}) - } -} - -type releaseIn struct { - Fh uint64 - Flags uint32 - ReleaseFlags uint32 - LockOwner uint32 -} - -type flushIn struct { - Fh uint64 - FlushFlags uint32 - _ uint32 - LockOwner uint64 -} - -type readIn struct { - Fh uint64 - Offset uint64 - Size uint32 - ReadFlags uint32 - LockOwner uint64 - Flags uint32 - _ uint32 -} - -func readInSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 9}): - return unsafe.Offsetof(readIn{}.ReadFlags) + 4 - default: - return unsafe.Sizeof(readIn{}) - } -} - -// The ReadFlags are passed in ReadRequest. -type ReadFlags uint32 - -const ( - // LockOwner field is valid. - ReadLockOwner ReadFlags = 1 << 1 -) - -var readFlagNames = []flagName{ - {uint32(ReadLockOwner), "ReadLockOwner"}, -} - -func (fl ReadFlags) String() string { - return flagString(uint32(fl), readFlagNames) -} - -type writeIn struct { - Fh uint64 - Offset uint64 - Size uint32 - WriteFlags uint32 - LockOwner uint64 - Flags uint32 - _ uint32 -} - -func writeInSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 9}): - return unsafe.Offsetof(writeIn{}.LockOwner) - default: - return unsafe.Sizeof(writeIn{}) - } -} - -type writeOut struct { - Size uint32 - _ uint32 -} - -// The WriteFlags are passed in WriteRequest. -type WriteFlags uint32 - -const ( - WriteCache WriteFlags = 1 << 0 - // LockOwner field is valid. - WriteLockOwner WriteFlags = 1 << 1 -) - -var writeFlagNames = []flagName{ - {uint32(WriteCache), "WriteCache"}, - {uint32(WriteLockOwner), "WriteLockOwner"}, -} - -func (fl WriteFlags) String() string { - return flagString(uint32(fl), writeFlagNames) -} - -const compatStatfsSize = 48 - -type statfsOut struct { - St kstatfs -} - -type fsyncIn struct { - Fh uint64 - FsyncFlags uint32 - _ uint32 -} - -type setxattrInCommon struct { - Size uint32 - Flags uint32 -} - -func (setxattrInCommon) position() uint32 { - return 0 -} - -type getxattrInCommon struct { - Size uint32 - _ uint32 -} - -func (getxattrInCommon) position() uint32 { - return 0 -} - -type getxattrOut struct { - Size uint32 - _ uint32 -} - -type lkIn struct { - Fh uint64 - Owner uint64 - Lk fileLock - LkFlags uint32 - _ uint32 -} - -func lkInSize(p Protocol) uintptr { - switch { - case p.LT(Protocol{7, 9}): - return unsafe.Offsetof(lkIn{}.LkFlags) - default: - return unsafe.Sizeof(lkIn{}) - } -} - -type lkOut struct { - Lk fileLock -} - -type accessIn struct { - Mask uint32 - _ uint32 -} - -type initIn struct { - Major uint32 - Minor uint32 - MaxReadahead uint32 - Flags uint32 -} - -const initInSize = int(unsafe.Sizeof(initIn{})) - -type initOut struct { - Major uint32 - Minor uint32 - MaxReadahead uint32 - Flags uint32 - Unused uint32 - MaxWrite uint32 -} - -type interruptIn struct { - Unique uint64 -} - -type bmapIn struct { - Block uint64 - BlockSize uint32 - _ uint32 -} - -type bmapOut struct { - Block uint64 -} - -type inHeader struct { - Len uint32 - Opcode uint32 - Unique uint64 - Nodeid uint64 - Uid uint32 - Gid uint32 - Pid uint32 - _ uint32 -} - -const inHeaderSize = int(unsafe.Sizeof(inHeader{})) - -type outHeader struct { - Len uint32 - Error int32 - Unique uint64 -} - -type dirent struct { - Ino uint64 - Off uint64 - Namelen uint32 - Type uint32 - Name [0]byte -} - -const direntSize = 8 + 8 + 4 + 4 - -const ( - notifyCodePoll int32 = 1 - notifyCodeInvalInode int32 = 2 - notifyCodeInvalEntry int32 = 3 -) - -type notifyInvalInodeOut struct { - Ino uint64 - Off int64 - Len int64 -} - -type notifyInvalEntryOut struct { - Parent uint64 - Namelen uint32 - _ uint32 -} diff --git a/vendor/bazil.org/fuse/fuse_kernel_darwin.go b/vendor/bazil.org/fuse/fuse_kernel_darwin.go deleted file mode 100644 index b9873fdf3..000000000 --- a/vendor/bazil.org/fuse/fuse_kernel_darwin.go +++ /dev/null @@ -1,88 +0,0 @@ -package fuse - -import ( - "time" -) - -type attr struct { - Ino uint64 - Size uint64 - Blocks uint64 - Atime uint64 - Mtime uint64 - Ctime uint64 - Crtime_ uint64 // OS X only - AtimeNsec uint32 - MtimeNsec uint32 - CtimeNsec uint32 - CrtimeNsec uint32 // OS X only - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint32 - Flags_ uint32 // OS X only; see chflags(2) - Blksize uint32 - padding uint32 -} - -func (a *attr) SetCrtime(s uint64, ns uint32) { - a.Crtime_, a.CrtimeNsec = s, ns -} - -func (a *attr) SetFlags(f uint32) { - a.Flags_ = f -} - -type setattrIn struct { - setattrInCommon - - // OS X only - Bkuptime_ uint64 - Chgtime_ uint64 - Crtime uint64 - BkuptimeNsec uint32 - ChgtimeNsec uint32 - CrtimeNsec uint32 - Flags_ uint32 // see chflags(2) -} - -func (in *setattrIn) BkupTime() time.Time { - return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec)) -} - -func (in *setattrIn) Chgtime() time.Time { - return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec)) -} - -func (in *setattrIn) Flags() uint32 { - return in.Flags_ -} - -func openFlags(flags uint32) OpenFlags { - return OpenFlags(flags) -} - -type getxattrIn struct { - getxattrInCommon - - // OS X only - Position uint32 - Padding uint32 -} - -func (g *getxattrIn) position() uint32 { - return g.Position -} - -type setxattrIn struct { - setxattrInCommon - - // OS X only - Position uint32 - Padding uint32 -} - -func (s *setxattrIn) position() uint32 { - return s.Position -} diff --git a/vendor/bazil.org/fuse/fuse_kernel_freebsd.go b/vendor/bazil.org/fuse/fuse_kernel_freebsd.go deleted file mode 100644 index b1141e41d..000000000 --- a/vendor/bazil.org/fuse/fuse_kernel_freebsd.go +++ /dev/null @@ -1,62 +0,0 @@ -package fuse - -import "time" - -type attr struct { - Ino uint64 - Size uint64 - Blocks uint64 - Atime uint64 - Mtime uint64 - Ctime uint64 - AtimeNsec uint32 - MtimeNsec uint32 - CtimeNsec uint32 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint32 - Blksize uint32 - padding uint32 -} - -func (a *attr) Crtime() time.Time { - return time.Time{} -} - -func (a *attr) SetCrtime(s uint64, ns uint32) { - // ignored on freebsd -} - -func (a *attr) SetFlags(f uint32) { - // ignored on freebsd -} - -type setattrIn struct { - setattrInCommon -} - -func (in *setattrIn) BkupTime() time.Time { - return time.Time{} -} - -func (in *setattrIn) Chgtime() time.Time { - return time.Time{} -} - -func (in *setattrIn) Flags() uint32 { - return 0 -} - -func openFlags(flags uint32) OpenFlags { - return OpenFlags(flags) -} - -type getxattrIn struct { - getxattrInCommon -} - -type setxattrIn struct { - setxattrInCommon -} diff --git a/vendor/bazil.org/fuse/fuse_kernel_linux.go b/vendor/bazil.org/fuse/fuse_kernel_linux.go deleted file mode 100644 index d3ba86617..000000000 --- a/vendor/bazil.org/fuse/fuse_kernel_linux.go +++ /dev/null @@ -1,70 +0,0 @@ -package fuse - -import "time" - -type attr struct { - Ino uint64 - Size uint64 - Blocks uint64 - Atime uint64 - Mtime uint64 - Ctime uint64 - AtimeNsec uint32 - MtimeNsec uint32 - CtimeNsec uint32 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint32 - Blksize uint32 - padding uint32 -} - -func (a *attr) Crtime() time.Time { - return time.Time{} -} - -func (a *attr) SetCrtime(s uint64, ns uint32) { - // Ignored on Linux. -} - -func (a *attr) SetFlags(f uint32) { - // Ignored on Linux. -} - -type setattrIn struct { - setattrInCommon -} - -func (in *setattrIn) BkupTime() time.Time { - return time.Time{} -} - -func (in *setattrIn) Chgtime() time.Time { - return time.Time{} -} - -func (in *setattrIn) Flags() uint32 { - return 0 -} - -func openFlags(flags uint32) OpenFlags { - // on amd64, the 32-bit O_LARGEFILE flag is always seen; - // on i386, the flag probably depends on the app - // requesting, but in any case should be utterly - // uninteresting to us here; our kernel protocol messages - // are not directly related to the client app's kernel - // API/ABI - flags &^= 0x8000 - - return OpenFlags(flags) -} - -type getxattrIn struct { - getxattrInCommon -} - -type setxattrIn struct { - setxattrInCommon -} diff --git a/vendor/bazil.org/fuse/fuse_kernel_std.go b/vendor/bazil.org/fuse/fuse_kernel_std.go deleted file mode 100644 index 074cfd322..000000000 --- a/vendor/bazil.org/fuse/fuse_kernel_std.go +++ /dev/null @@ -1 +0,0 @@ -package fuse diff --git a/vendor/bazil.org/fuse/fuse_linux.go b/vendor/bazil.org/fuse/fuse_linux.go deleted file mode 100644 index 5fb96f9ae..000000000 --- a/vendor/bazil.org/fuse/fuse_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package fuse - -// Maximum file write size we are prepared to receive from the kernel. -// -// Linux 4.2.0 has been observed to cap this value at 128kB -// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages). -const maxWrite = 128 * 1024 diff --git a/vendor/bazil.org/fuse/fuseutil/fuseutil.go b/vendor/bazil.org/fuse/fuseutil/fuseutil.go deleted file mode 100644 index b3f52b73b..000000000 --- a/vendor/bazil.org/fuse/fuseutil/fuseutil.go +++ /dev/null @@ -1,20 +0,0 @@ -package fuseutil // import "bazil.org/fuse/fuseutil" - -import ( - "bazil.org/fuse" -) - -// HandleRead handles a read request assuming that data is the entire file content. -// It adjusts the amount returned in resp according to req.Offset and req.Size. -func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) { - if req.Offset >= int64(len(data)) { - data = nil - } else { - data = data[req.Offset:] - } - if len(data) > req.Size { - data = data[:req.Size] - } - n := copy(resp.Data[:req.Size], data) - resp.Data = resp.Data[:n] -} diff --git a/vendor/bazil.org/fuse/mount.go b/vendor/bazil.org/fuse/mount.go deleted file mode 100644 index 8054e9021..000000000 --- a/vendor/bazil.org/fuse/mount.go +++ /dev/null @@ -1,38 +0,0 @@ -package fuse - -import ( - "bufio" - "errors" - "io" - "log" - "sync" -) - -var ( - // ErrOSXFUSENotFound is returned from Mount when the OSXFUSE - // installation is not detected. - // - // Only happens on OS X. Make sure OSXFUSE is installed, or see - // OSXFUSELocations for customization. - ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE") -) - -func neverIgnoreLine(line string) bool { - return false -} - -func lineLogger(wg *sync.WaitGroup, prefix string, ignore func(line string) bool, r io.ReadCloser) { - defer wg.Done() - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - if ignore(line) { - continue - } - log.Printf("%s: %s", prefix, line) - } - if err := scanner.Err(); err != nil { - log.Printf("%s, error reading: %v", prefix, err) - } -} diff --git a/vendor/bazil.org/fuse/mount_darwin.go b/vendor/bazil.org/fuse/mount_darwin.go deleted file mode 100644 index c1c36e62b..000000000 --- a/vendor/bazil.org/fuse/mount_darwin.go +++ /dev/null @@ -1,208 +0,0 @@ -package fuse - -import ( - "errors" - "fmt" - "log" - "os" - "os/exec" - "path" - "strconv" - "strings" - "sync" - "syscall" -) - -var ( - errNoAvail = errors.New("no available fuse devices") - errNotLoaded = errors.New("osxfuse is not loaded") -) - -func loadOSXFUSE(bin string) error { - cmd := exec.Command(bin) - cmd.Dir = "/" - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err -} - -func openOSXFUSEDev(devPrefix string) (*os.File, error) { - var f *os.File - var err error - for i := uint64(0); ; i++ { - path := devPrefix + strconv.FormatUint(i, 10) - f, err = os.OpenFile(path, os.O_RDWR, 0000) - if os.IsNotExist(err) { - if i == 0 { - // not even the first device was found -> fuse is not loaded - return nil, errNotLoaded - } - - // we've run out of kernel-provided devices - return nil, errNoAvail - } - - if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY { - // try the next one - continue - } - - if err != nil { - return nil, err - } - return f, nil - } -} - -func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) { - var noMountpointPrefix = helperName + `: ` - const noMountpointSuffix = `: No such file or directory` - return func(line string) (ignore bool) { - if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { - // re-extract it from the error message in case some layer - // changed the path - mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] - err := &MountpointDoesNotExistError{ - Path: mountpoint, - } - select { - case errCh <- err: - return true - default: - // not the first error; fall back to logging it - return false - } - } - - return false - } -} - -// isBoringMountOSXFUSEError returns whether the Wait error is -// uninteresting; exit status 64 is. -func isBoringMountOSXFUSEError(err error) bool { - if err, ok := err.(*exec.ExitError); ok && err.Exited() { - if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 { - return true - } - } - return false -} - -func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error { - for k, v := range conf.options { - if strings.Contains(k, ",") || strings.Contains(v, ",") { - // Silly limitation but the mount helper does not - // understand any escaping. See TestMountOptionCommaError. - return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v) - } - } - cmd := exec.Command( - bin, - "-o", conf.getOptions(), - // Tell osxfuse-kext how large our buffer is. It must split - // writes larger than this into multiple writes. - // - // OSXFUSE seems to ignore InitResponse.MaxWrite, and uses - // this instead. - "-o", "iosize="+strconv.FormatUint(maxWrite, 10), - // refers to fd passed in cmd.ExtraFiles - "3", - dir, - ) - cmd.ExtraFiles = []*os.File{f} - cmd.Env = os.Environ() - // OSXFUSE <3.3.0 - cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=") - // OSXFUSE >=3.3.0 - cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=") - - daemon := os.Args[0] - if daemonVar != "" { - cmd.Env = append(cmd.Env, daemonVar+"="+daemon) - } - - stdout, err := cmd.StdoutPipe() - if err != nil { - return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err) - } - stderr, err := cmd.StderrPipe() - if err != nil { - return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err) - } - - if err := cmd.Start(); err != nil { - return fmt.Errorf("mount_osxfusefs: %v", err) - } - helperErrCh := make(chan error, 1) - go func() { - var wg sync.WaitGroup - wg.Add(2) - go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) - helperName := path.Base(bin) - go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr) - wg.Wait() - if err := cmd.Wait(); err != nil { - // see if we have a better error to report - select { - case helperErr := <-helperErrCh: - // log the Wait error if it's not what we expected - if !isBoringMountOSXFUSEError(err) { - log.Printf("mount helper failed: %v", err) - } - // and now return what we grabbed from stderr as the real - // error - *errp = helperErr - close(ready) - return - default: - // nope, fall back to generic message - } - - *errp = fmt.Errorf("mount_osxfusefs: %v", err) - close(ready) - return - } - - *errp = nil - close(ready) - }() - return nil -} - -func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) { - locations := conf.osxfuseLocations - if locations == nil { - locations = []OSXFUSEPaths{ - OSXFUSELocationV3, - OSXFUSELocationV2, - } - } - for _, loc := range locations { - if _, err := os.Stat(loc.Mount); os.IsNotExist(err) { - // try the other locations - continue - } - - f, err := openOSXFUSEDev(loc.DevicePrefix) - if err == errNotLoaded { - err = loadOSXFUSE(loc.Load) - if err != nil { - return nil, err - } - // try again - f, err = openOSXFUSEDev(loc.DevicePrefix) - } - if err != nil { - return nil, err - } - err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp) - if err != nil { - f.Close() - return nil, err - } - return f, nil - } - return nil, ErrOSXFUSENotFound -} diff --git a/vendor/bazil.org/fuse/mount_freebsd.go b/vendor/bazil.org/fuse/mount_freebsd.go deleted file mode 100644 index 70bb41024..000000000 --- a/vendor/bazil.org/fuse/mount_freebsd.go +++ /dev/null @@ -1,111 +0,0 @@ -package fuse - -import ( - "fmt" - "log" - "os" - "os/exec" - "strings" - "sync" - "syscall" -) - -func handleMountFusefsStderr(errCh chan<- error) func(line string) (ignore bool) { - return func(line string) (ignore bool) { - const ( - noMountpointPrefix = `mount_fusefs: ` - noMountpointSuffix = `: No such file or directory` - ) - if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { - // re-extract it from the error message in case some layer - // changed the path - mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] - err := &MountpointDoesNotExistError{ - Path: mountpoint, - } - select { - case errCh <- err: - return true - default: - // not the first error; fall back to logging it - return false - } - } - - return false - } -} - -// isBoringMountFusefsError returns whether the Wait error is -// uninteresting; exit status 1 is. -func isBoringMountFusefsError(err error) bool { - if err, ok := err.(*exec.ExitError); ok && err.Exited() { - if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 { - return true - } - } - return false -} - -func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) { - for k, v := range conf.options { - if strings.Contains(k, ",") || strings.Contains(v, ",") { - // Silly limitation but the mount helper does not - // understand any escaping. See TestMountOptionCommaError. - return nil, fmt.Errorf("mount options cannot contain commas on FreeBSD: %q=%q", k, v) - } - } - - f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000) - if err != nil { - *errp = err - return nil, err - } - - cmd := exec.Command( - "/sbin/mount_fusefs", - "--safe", - "-o", conf.getOptions(), - "3", - dir, - ) - cmd.ExtraFiles = []*os.File{f} - - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err) - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err) - } - - if err := cmd.Start(); err != nil { - return nil, fmt.Errorf("mount_fusefs: %v", err) - } - helperErrCh := make(chan error, 1) - var wg sync.WaitGroup - wg.Add(2) - go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) - go lineLogger(&wg, "mount helper error", handleMountFusefsStderr(helperErrCh), stderr) - wg.Wait() - if err := cmd.Wait(); err != nil { - // see if we have a better error to report - select { - case helperErr := <-helperErrCh: - // log the Wait error if it's not what we expected - if !isBoringMountFusefsError(err) { - log.Printf("mount helper failed: %v", err) - } - // and now return what we grabbed from stderr as the real - // error - return nil, helperErr - default: - // nope, fall back to generic message - } - return nil, fmt.Errorf("mount_fusefs: %v", err) - } - - close(ready) - return f, nil -} diff --git a/vendor/bazil.org/fuse/mount_linux.go b/vendor/bazil.org/fuse/mount_linux.go deleted file mode 100644 index 197d1044e..000000000 --- a/vendor/bazil.org/fuse/mount_linux.go +++ /dev/null @@ -1,150 +0,0 @@ -package fuse - -import ( - "fmt" - "log" - "net" - "os" - "os/exec" - "strings" - "sync" - "syscall" -) - -func handleFusermountStderr(errCh chan<- error) func(line string) (ignore bool) { - return func(line string) (ignore bool) { - if line == `fusermount: failed to open /etc/fuse.conf: Permission denied` { - // Silence this particular message, it occurs way too - // commonly and isn't very relevant to whether the mount - // succeeds or not. - return true - } - - const ( - noMountpointPrefix = `fusermount: failed to access mountpoint ` - noMountpointSuffix = `: No such file or directory` - ) - if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { - // re-extract it from the error message in case some layer - // changed the path - mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] - err := &MountpointDoesNotExistError{ - Path: mountpoint, - } - select { - case errCh <- err: - return true - default: - // not the first error; fall back to logging it - return false - } - } - - return false - } -} - -// isBoringFusermountError returns whether the Wait error is -// uninteresting; exit status 1 is. -func isBoringFusermountError(err error) bool { - if err, ok := err.(*exec.ExitError); ok && err.Exited() { - if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 { - return true - } - } - return false -} - -func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) { - // linux mount is never delayed - close(ready) - - fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0) - if err != nil { - return nil, fmt.Errorf("socketpair error: %v", err) - } - - writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes") - defer writeFile.Close() - - readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads") - defer readFile.Close() - - cmd := exec.Command( - "fusermount", - "-o", conf.getOptions(), - "--", - dir, - ) - cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3") - - cmd.ExtraFiles = []*os.File{writeFile} - - var wg sync.WaitGroup - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, fmt.Errorf("setting up fusermount stderr: %v", err) - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, fmt.Errorf("setting up fusermount stderr: %v", err) - } - - if err := cmd.Start(); err != nil { - return nil, fmt.Errorf("fusermount: %v", err) - } - helperErrCh := make(chan error, 1) - wg.Add(2) - go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) - go lineLogger(&wg, "mount helper error", handleFusermountStderr(helperErrCh), stderr) - wg.Wait() - if err := cmd.Wait(); err != nil { - // see if we have a better error to report - select { - case helperErr := <-helperErrCh: - // log the Wait error if it's not what we expected - if !isBoringFusermountError(err) { - log.Printf("mount helper failed: %v", err) - } - // and now return what we grabbed from stderr as the real - // error - return nil, helperErr - default: - // nope, fall back to generic message - } - - return nil, fmt.Errorf("fusermount: %v", err) - } - - c, err := net.FileConn(readFile) - if err != nil { - return nil, fmt.Errorf("FileConn from fusermount socket: %v", err) - } - defer c.Close() - - uc, ok := c.(*net.UnixConn) - if !ok { - return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c) - } - - buf := make([]byte, 32) // expect 1 byte - oob := make([]byte, 32) // expect 24 bytes - _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) - scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) - if err != nil { - return nil, fmt.Errorf("ParseSocketControlMessage: %v", err) - } - if len(scms) != 1 { - return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms) - } - scm := scms[0] - gotFds, err := syscall.ParseUnixRights(&scm) - if err != nil { - return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err) - } - if len(gotFds) != 1 { - return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds) - } - f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse") - return f, nil -} diff --git a/vendor/bazil.org/fuse/options.go b/vendor/bazil.org/fuse/options.go deleted file mode 100644 index 65ce8a541..000000000 --- a/vendor/bazil.org/fuse/options.go +++ /dev/null @@ -1,310 +0,0 @@ -package fuse - -import ( - "errors" - "strings" -) - -func dummyOption(conf *mountConfig) error { - return nil -} - -// mountConfig holds the configuration for a mount operation. -// Use it by passing MountOption values to Mount. -type mountConfig struct { - options map[string]string - maxReadahead uint32 - initFlags InitFlags - osxfuseLocations []OSXFUSEPaths -} - -func escapeComma(s string) string { - s = strings.Replace(s, `\`, `\\`, -1) - s = strings.Replace(s, `,`, `\,`, -1) - return s -} - -// getOptions makes a string of options suitable for passing to FUSE -// mount flag `-o`. Returns an empty string if no options were set. -// Any platform specific adjustments should happen before the call. -func (m *mountConfig) getOptions() string { - var opts []string - for k, v := range m.options { - k = escapeComma(k) - if v != "" { - k += "=" + escapeComma(v) - } - opts = append(opts, k) - } - return strings.Join(opts, ",") -} - -type mountOption func(*mountConfig) error - -// MountOption is passed to Mount to change the behavior of the mount. -type MountOption mountOption - -// FSName sets the file system name (also called source) that is -// visible in the list of mounted file systems. -// -// FreeBSD ignores this option. -func FSName(name string) MountOption { - return func(conf *mountConfig) error { - conf.options["fsname"] = name - return nil - } -} - -// Subtype sets the subtype of the mount. The main type is always -// `fuse`. The type in a list of mounted file systems will look like -// `fuse.foo`. -// -// OS X ignores this option. -// FreeBSD ignores this option. -func Subtype(fstype string) MountOption { - return func(conf *mountConfig) error { - conf.options["subtype"] = fstype - return nil - } -} - -// LocalVolume sets the volume to be local (instead of network), -// changing the behavior of Finder, Spotlight, and such. -// -// OS X only. Others ignore this option. -func LocalVolume() MountOption { - return localVolume -} - -// VolumeName sets the volume name shown in Finder. -// -// OS X only. Others ignore this option. -func VolumeName(name string) MountOption { - return volumeName(name) -} - -// NoAppleDouble makes OSXFUSE disallow files with names used by OS X -// to store extended attributes on file systems that do not support -// them natively. -// -// Such file names are: -// -// ._* -// .DS_Store -// -// OS X only. Others ignore this option. -func NoAppleDouble() MountOption { - return noAppleDouble -} - -// NoAppleXattr makes OSXFUSE disallow extended attributes with the -// prefix "com.apple.". This disables persistent Finder state and -// other such information. -// -// OS X only. Others ignore this option. -func NoAppleXattr() MountOption { - return noAppleXattr -} - -// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates, -// i.e. create calls for which the initiator explicitly set the O_EXCL flag. -// -// OSXFUSE expects all create calls to return EEXIST in case the file -// already exists, regardless of whether O_EXCL was specified or not. -// To ensure this behavior, it normally sets OpenExclusive for all -// Create calls, regardless of whether the original call had it set. -// For distributed filesystems, that may force every file create to be -// a distributed consensus action, causing undesirable delays. -// -// This option makes the FUSE filesystem see the original flag value, -// and better decide when to ensure global consensus. -// -// Note that returning EEXIST on existing file create is still -// expected with OSXFUSE, regardless of the presence of the -// OpenExclusive flag. -// -// For more information, see -// https://github.com/osxfuse/osxfuse/issues/209 -// -// OS X only. Others ignore this options. -// Requires OSXFUSE 3.4.1 or newer. -func ExclCreate() MountOption { - return exclCreate -} - -// DaemonTimeout sets the time in seconds between a request and a reply before -// the FUSE mount is declared dead. -// -// OS X and FreeBSD only. Others ignore this option. -func DaemonTimeout(name string) MountOption { - return daemonTimeout(name) -} - -var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot") - -// AllowOther allows other users to access the file system. -// -// Only one of AllowOther or AllowRoot can be used. -func AllowOther() MountOption { - return func(conf *mountConfig) error { - if _, ok := conf.options["allow_root"]; ok { - return ErrCannotCombineAllowOtherAndAllowRoot - } - conf.options["allow_other"] = "" - return nil - } -} - -// AllowRoot allows other users to access the file system. -// -// Only one of AllowOther or AllowRoot can be used. -// -// FreeBSD ignores this option. -func AllowRoot() MountOption { - return func(conf *mountConfig) error { - if _, ok := conf.options["allow_other"]; ok { - return ErrCannotCombineAllowOtherAndAllowRoot - } - conf.options["allow_root"] = "" - return nil - } -} - -// AllowDev enables interpreting character or block special devices on the -// filesystem. -func AllowDev() MountOption { - return func(conf *mountConfig) error { - conf.options["dev"] = "" - return nil - } -} - -// AllowSUID allows set-user-identifier or set-group-identifier bits to take -// effect. -func AllowSUID() MountOption { - return func(conf *mountConfig) error { - conf.options["suid"] = "" - return nil - } -} - -// DefaultPermissions makes the kernel enforce access control based on -// the file mode (as in chmod). -// -// Without this option, the Node itself decides what is and is not -// allowed. This is normally ok because FUSE file systems cannot be -// accessed by other users without AllowOther/AllowRoot. -// -// FreeBSD ignores this option. -func DefaultPermissions() MountOption { - return func(conf *mountConfig) error { - conf.options["default_permissions"] = "" - return nil - } -} - -// ReadOnly makes the mount read-only. -func ReadOnly() MountOption { - return func(conf *mountConfig) error { - conf.options["ro"] = "" - return nil - } -} - -// MaxReadahead sets the number of bytes that can be prefetched for -// sequential reads. The kernel can enforce a maximum value lower than -// this. -// -// This setting makes the kernel perform speculative reads that do not -// originate from any client process. This usually tremendously -// improves read performance. -func MaxReadahead(n uint32) MountOption { - return func(conf *mountConfig) error { - conf.maxReadahead = n - return nil - } -} - -// AsyncRead enables multiple outstanding read requests for the same -// handle. Without this, there is at most one request in flight at a -// time. -func AsyncRead() MountOption { - return func(conf *mountConfig) error { - conf.initFlags |= InitAsyncRead - return nil - } -} - -// WritebackCache enables the kernel to buffer writes before sending -// them to the FUSE server. Without this, writethrough caching is -// used. -func WritebackCache() MountOption { - return func(conf *mountConfig) error { - conf.initFlags |= InitWritebackCache - return nil - } -} - -// OSXFUSEPaths describes the paths used by an installed OSXFUSE -// version. See OSXFUSELocationV3 for typical values. -type OSXFUSEPaths struct { - // Prefix for the device file. At mount time, an incrementing - // number is suffixed until a free FUSE device is found. - DevicePrefix string - // Path of the load helper, used to load the kernel extension if - // no device files are found. - Load string - // Path of the mount helper, used for the actual mount operation. - Mount string - // Environment variable used to pass the path to the executable - // calling the mount helper. - DaemonVar string -} - -// Default paths for OSXFUSE. See OSXFUSELocations. -var ( - OSXFUSELocationV3 = OSXFUSEPaths{ - DevicePrefix: "/dev/osxfuse", - Load: "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse", - Mount: "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse", - DaemonVar: "MOUNT_OSXFUSE_DAEMON_PATH", - } - OSXFUSELocationV2 = OSXFUSEPaths{ - DevicePrefix: "/dev/osxfuse", - Load: "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs", - Mount: "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs", - DaemonVar: "MOUNT_FUSEFS_DAEMON_PATH", - } -) - -// OSXFUSELocations sets where to look for OSXFUSE files. The -// arguments are all the possible locations. The previous locations -// are replaced. -// -// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are -// used. -// -// OS X only. Others ignore this option. -func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption { - return func(conf *mountConfig) error { - if len(paths) == 0 { - return errors.New("must specify at least one location for OSXFUSELocations") - } - // replace previous values, but make a copy so there's no - // worries about caller mutating their slice - conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...) - return nil - } -} - -// AllowNonEmptyMount allows the mounting over a non-empty directory. -// -// The files in it will be shadowed by the freshly created mount. By -// default these mounts are rejected to prevent accidental covering up -// of data, which could for example prevent automatic backup. -func AllowNonEmptyMount() MountOption { - return func(conf *mountConfig) error { - conf.options["nonempty"] = "" - return nil - } -} diff --git a/vendor/bazil.org/fuse/options_darwin.go b/vendor/bazil.org/fuse/options_darwin.go deleted file mode 100644 index faa9d78e7..000000000 --- a/vendor/bazil.org/fuse/options_darwin.go +++ /dev/null @@ -1,35 +0,0 @@ -package fuse - -func localVolume(conf *mountConfig) error { - conf.options["local"] = "" - return nil -} - -func volumeName(name string) MountOption { - return func(conf *mountConfig) error { - conf.options["volname"] = name - return nil - } -} - -func daemonTimeout(name string) MountOption { - return func(conf *mountConfig) error { - conf.options["daemon_timeout"] = name - return nil - } -} - -func noAppleXattr(conf *mountConfig) error { - conf.options["noapplexattr"] = "" - return nil -} - -func noAppleDouble(conf *mountConfig) error { - conf.options["noappledouble"] = "" - return nil -} - -func exclCreate(conf *mountConfig) error { - conf.options["excl_create"] = "" - return nil -} diff --git a/vendor/bazil.org/fuse/options_freebsd.go b/vendor/bazil.org/fuse/options_freebsd.go deleted file mode 100644 index 7c164b136..000000000 --- a/vendor/bazil.org/fuse/options_freebsd.go +++ /dev/null @@ -1,28 +0,0 @@ -package fuse - -func localVolume(conf *mountConfig) error { - return nil -} - -func volumeName(name string) MountOption { - return dummyOption -} - -func daemonTimeout(name string) MountOption { - return func(conf *mountConfig) error { - conf.options["timeout"] = name - return nil - } -} - -func noAppleXattr(conf *mountConfig) error { - return nil -} - -func noAppleDouble(conf *mountConfig) error { - return nil -} - -func exclCreate(conf *mountConfig) error { - return nil -} diff --git a/vendor/bazil.org/fuse/options_linux.go b/vendor/bazil.org/fuse/options_linux.go deleted file mode 100644 index 13f0896d5..000000000 --- a/vendor/bazil.org/fuse/options_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -package fuse - -func localVolume(conf *mountConfig) error { - return nil -} - -func volumeName(name string) MountOption { - return dummyOption -} - -func daemonTimeout(name string) MountOption { - return dummyOption -} - -func noAppleXattr(conf *mountConfig) error { - return nil -} - -func noAppleDouble(conf *mountConfig) error { - return nil -} - -func exclCreate(conf *mountConfig) error { - return nil -} diff --git a/vendor/bazil.org/fuse/protocol.go b/vendor/bazil.org/fuse/protocol.go deleted file mode 100644 index a77bbf72f..000000000 --- a/vendor/bazil.org/fuse/protocol.go +++ /dev/null @@ -1,75 +0,0 @@ -package fuse - -import ( - "fmt" -) - -// Protocol is a FUSE protocol version number. -type Protocol struct { - Major uint32 - Minor uint32 -} - -func (p Protocol) String() string { - return fmt.Sprintf("%d.%d", p.Major, p.Minor) -} - -// LT returns whether a is less than b. -func (a Protocol) LT(b Protocol) bool { - return a.Major < b.Major || - (a.Major == b.Major && a.Minor < b.Minor) -} - -// GE returns whether a is greater than or equal to b. -func (a Protocol) GE(b Protocol) bool { - return a.Major > b.Major || - (a.Major == b.Major && a.Minor >= b.Minor) -} - -func (a Protocol) is79() bool { - return a.GE(Protocol{7, 9}) -} - -// HasAttrBlockSize returns whether Attr.BlockSize is respected by the -// kernel. -func (a Protocol) HasAttrBlockSize() bool { - return a.is79() -} - -// HasReadWriteFlags returns whether ReadRequest/WriteRequest -// fields Flags and FileFlags are valid. -func (a Protocol) HasReadWriteFlags() bool { - return a.is79() -} - -// HasGetattrFlags returns whether GetattrRequest field Flags is -// valid. -func (a Protocol) HasGetattrFlags() bool { - return a.is79() -} - -func (a Protocol) is710() bool { - return a.GE(Protocol{7, 10}) -} - -// HasOpenNonSeekable returns whether OpenResponse field Flags flag -// OpenNonSeekable is supported. -func (a Protocol) HasOpenNonSeekable() bool { - return a.is710() -} - -func (a Protocol) is712() bool { - return a.GE(Protocol{7, 12}) -} - -// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest -// field Umask is valid. -func (a Protocol) HasUmask() bool { - return a.is712() -} - -// HasInvalidate returns whether InvalidateNode/InvalidateEntry are -// supported. -func (a Protocol) HasInvalidate() bool { - return a.is712() -} diff --git a/vendor/bazil.org/fuse/unmount.go b/vendor/bazil.org/fuse/unmount.go deleted file mode 100644 index ffe3f155c..000000000 --- a/vendor/bazil.org/fuse/unmount.go +++ /dev/null @@ -1,6 +0,0 @@ -package fuse - -// Unmount tries to unmount the filesystem mounted at dir. -func Unmount(dir string) error { - return unmount(dir) -} diff --git a/vendor/bazil.org/fuse/unmount_linux.go b/vendor/bazil.org/fuse/unmount_linux.go deleted file mode 100644 index 088f0cfee..000000000 --- a/vendor/bazil.org/fuse/unmount_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -package fuse - -import ( - "bytes" - "errors" - "os/exec" -) - -func unmount(dir string) error { - cmd := exec.Command("fusermount", "-u", dir) - output, err := cmd.CombinedOutput() - if err != nil { - if len(output) > 0 { - output = bytes.TrimRight(output, "\n") - msg := err.Error() + ": " + string(output) - err = errors.New(msg) - } - return err - } - return nil -} diff --git a/vendor/bazil.org/fuse/unmount_std.go b/vendor/bazil.org/fuse/unmount_std.go deleted file mode 100644 index d6efe276f..000000000 --- a/vendor/bazil.org/fuse/unmount_std.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux - -package fuse - -import ( - "os" - "syscall" -) - -func unmount(dir string) error { - err := syscall.Unmount(dir, 0) - if err != nil { - err = &os.PathError{Op: "unmount", Path: dir, Err: err} - return err - } - return nil -} diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS deleted file mode 100644 index c364af1da..000000000 --- a/vendor/cloud.google.com/go/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Filippo Valsorda -Google Inc. -Ingo Oeser -Palm Stone Games, Inc. -Paweł Knap -Péter Szilágyi -Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS deleted file mode 100644 index 3b3cbed98..000000000 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ /dev/null @@ -1,40 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Alexis Hunt -Andreas Litt -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Sansome -David Symonds -Filippo Valsorda -Glenn Lewis -Ingo Oeser -James Hall -Johan Euphrosine -Jonathan Amsterdam -Kunpei Sakai -Luna Duclos -Magnus Hiie -Mario Castro -Michael McGreevy -Omar Jarjur -Paweł Knap -Péter Szilágyi -Sarah Adams -Thanatat Tamtan -Toby Burress -Tuo Shan -Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index 125b7033c..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }} -) - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := defaultClient.hc.Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - addrs, err := net.LookupHost("metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. -func NewClient(c *http.Client) *Client { - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := c.hc.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE deleted file mode 100644 index af39a91e7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE deleted file mode 100644 index 2d1d72608..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Microsoft Azure-SDK-for-Go -Copyright 2014-2017 Microsoft - -This product includes software developed at -the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md deleted file mode 100644 index 459b45831..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Azure Storage SDK for Go (Preview) - -:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the -future. Please use one of the following packages instead. - -| Service | Import Path/Repo | -|---------|------------------| -| Storage - Blobs | [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) | -| Storage - Files | [github.com/Azure/azure-storage-file-go](https://github.com/Azure/azure-storage-file-go) | -| Storage - Queues | [github.com/Azure/azure-storage-queue-go](https://github.com/Azure/azure-storage-queue-go) | - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage -[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane -resources: containers, blobs, tables, and queues. - -To manage storage *accounts* use Azure Resource Manager (ARM) via the packages -at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage). - -This package also supports the [Azure Storage -Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) -(Windows only). - diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go deleted file mode 100644 index 8b5b96d48..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go +++ /dev/null @@ -1,91 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "time" -) - -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeAppend) -} - -// AppendBlockOptions includes the options for an append block operation -type AppendBlockOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - MaxSize *uint `header:"x-ms-blob-condition-maxsize"` - AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` - ContentMD5 bool -} - -// AppendBlock appends a block to an append blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block -func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { - params := url.Values{"comp": {"appendblock"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - if options.ContentMD5 { - md5sum := md5.Sum(chunk) - headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeAppend) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go deleted file mode 100644 index 76794c305..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ /dev/null @@ -1,246 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "net/url" - "sort" - "strings" -) - -// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services - -type authentication string - -const ( - sharedKey authentication = "sharedKey" - sharedKeyForTable authentication = "sharedKeyTable" - sharedKeyLite authentication = "sharedKeyLite" - sharedKeyLiteForTable authentication = "sharedKeyLiteTable" - - // headers - headerAcceptCharset = "Accept-Charset" - headerAuthorization = "Authorization" - headerContentLength = "Content-Length" - headerDate = "Date" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" - headerDataServiceVersion = "DataServiceVersion" - headerMaxDataServiceVersion = "MaxDataServiceVersion" - headerContentTransferEncoding = "Content-Transfer-Encoding" -) - -func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { - if !c.sasClient { - authHeader, err := c.getSharedKey(verb, url, headers, auth) - if err != nil { - return nil, err - } - headers[headerAuthorization] = authHeader - } - return headers, nil -} - -func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { - canRes, err := c.buildCanonicalizedResource(url, auth, false) - if err != nil { - return "", err - } - - canString, err := buildCanonicalizedString(verb, headers, canRes, auth) - if err != nil { - return "", err - } - return c.createAuthorizationHeader(canString, auth), nil -} - -func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := bytes.NewBufferString("") - if c.accountName != StorageEmulatorAccountName || !sas { - cr.WriteString("/") - cr.WriteString(c.getCanonicalizedAccountName()) - } - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 - if auth == sharedKey { - if len(params) > 0 { - cr.WriteString("\n") - - keys := []string{} - for key := range params { - keys = append(keys, key) - } - sort.Strings(keys) - - completeParams := []string{} - for _, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) - } - cr.WriteString(strings.Join(completeParams, "\n")) - } - } else { - // search for "comp" parameter, if exists then add it to canonicalizedresource - if v, ok := params["comp"]; ok { - cr.WriteString("?comp=" + v[0]) - } - } - - return string(cr.Bytes()), nil -} - -func (c *Client) getCanonicalizedAccountName() string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(c.accountName, "-secondary") -} - -func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { - contentLength := headers[headerContentLength] - if contentLength == "0" { - contentLength = "" - } - date := headers[headerDate] - if v, ok := headers[headerXmsDate]; ok { - if auth == sharedKey || auth == sharedKeyLite { - date = "" - } else { - date = v - } - } - var canString string - switch auth { - case sharedKey: - canString = strings.Join([]string{ - verb, - headers[headerContentEncoding], - headers[headerContentLanguage], - contentLength, - headers[headerContentMD5], - headers[headerContentType], - date, - headers[headerIfModifiedSince], - headers[headerIfMatch], - headers[headerIfNoneMatch], - headers[headerIfUnmodifiedSince], - headers[headerRange], - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyForTable: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - canonicalizedResource, - }, "\n") - case sharedKeyLite: - canString = strings.Join([]string{ - verb, - headers[headerContentMD5], - headers[headerContentType], - date, - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case sharedKeyLiteForTable: - canString = strings.Join([]string{ - date, - canonicalizedResource, - }, "\n") - default: - return "", fmt.Errorf("%s authentication is not supported yet", auth) - } - return canString, nil -} - -func buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := []string{} - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := bytes.NewBufferString("") - - for _, key := range keys { - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(cm[key]) - ch.WriteRune('\n') - } - - return strings.TrimSuffix(string(ch.Bytes()), "\n") -} - -func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { - signature := c.computeHmac256(canonicalizedString) - var key string - switch auth { - case sharedKey, sharedKeyForTable: - key = "SharedKey" - case sharedKeyLite, sharedKeyLiteForTable: - key = "SharedKeyLite" - } - return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go deleted file mode 100644 index 1d2248625..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ /dev/null @@ -1,632 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// A Blob is an entry in BlobListResponse. -type Blob struct { - Container *Container - Name string `xml:"Name"` - Snapshot time.Time `xml:"Snapshot"` - Properties BlobProperties `xml:"Properties"` - Metadata BlobMetadata `xml:"Metadata"` -} - -// PutBlobOptions includes the options any put blob operation -// (page, block, append) -type PutBlobOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// BlobMetadata is a set of custom name/value pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx -type BlobMetadata map[string]string - -type blobMetadataEntries struct { - Entries []blobMetadataEntry `xml:",any"` -} -type blobMetadataEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` -} - -// UnmarshalXML converts the xml:Metadata into Metadata map -func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var entries blobMetadataEntries - if err := d.DecodeElement(&entries, &start); err != nil { - return err - } - for _, entry := range entries.Entries { - if *bm == nil { - *bm = make(BlobMetadata) - } - (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value - } - return nil -} - -// MarshalXML implements the xml.Marshaler interface. It encodes -// metadata name/value pairs as they would appear in an Azure -// ListBlobs response. -func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - entries := make([]blobMetadataEntry, 0, len(bm)) - for k, v := range bm { - entries = append(entries, blobMetadataEntry{ - XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, - Value: v, - }) - } - return enc.EncodeElement(blobMetadataEntries{ - Entries: entries, - }, start) -} - -// BlobProperties contains various properties of a blob -// returned in various endpoints like ListBlobs or GetBlobProperties. -type BlobProperties struct { - LastModified TimeRFC1123 `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` - ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` - CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` - ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` - ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` - BlobType BlobType `xml:"BlobType"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - ServerEncrypted bool `xml:"ServerEncrypted"` - IncrementalCopy bool `xml:"IncrementalCopy"` -} - -// BlobType defines the type of the Azure Blob. -type BlobType string - -// Types of page blobs -const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" - BlobTypeAppend BlobType = "AppendBlob" -) - -func (b *Blob) buildPath() string { - return b.Container.buildPath() + "/" + b.Name -} - -// Exists returns true if a blob with given name exists on the specified -// container of the storage account. -func (b *Blob) Exists() (bool, error) { - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) - headers := b.Container.bsc.client.getStandardHeaders() - resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetURL gets the canonical URL to the blob with the specified name in the -// specified container. -// This method does not create a publicly accessible URL if the blob or container -// is private and this method does not check if the blob exists. -func (b *Blob) GetURL() string { - container := b.Container.Name - if container == "" { - container = "$root" - } - return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) -} - -// GetBlobRangeOptions includes the options for a get blob range operation -type GetBlobRangeOptions struct { - Range *BlobRange - GetRangeContentMD5 bool - *GetBlobOptions -} - -// GetBlobOptions includes the options for a get blob operation -type GetBlobOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// BlobRange represents the bytes range to be get -type BlobRange struct { - Start uint64 - End uint64 -} - -func (br BlobRange) String() string { - if br.End == 0 { - return fmt.Sprintf("bytes=%d-", br.Start) - } - return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) -} - -// Get returns a stream to read the blob. Caller must call both Read and Close() -// to correctly close the underlying connection. -// -// See the GetRange method for use with a Range header. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob -func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { - rangeOptions := GetBlobRangeOptions{ - GetBlobOptions: options, - } - resp, err := b.getRange(&rangeOptions) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - if err := b.writeProperties(resp.Header, true); err != nil { - return resp.Body, err - } - return resp.Body, nil -} - -// GetRange reads the specified range of a blob to a stream. The bytesRange -// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// Caller must call both Read and Close()// to correctly close the underlying -// connection. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob -func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { - resp, err := b.getRange(options) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil { - return nil, err - } - // Content-Length header should not be updated, as the service returns the range length - // (which is not alwys the full blob length) - if err := b.writeProperties(resp.Header, false); err != nil { - return resp.Body, err - } - return resp.Body, nil -} - -func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - if options.Range != nil { - headers["Range"] = options.Range.String() - if options.GetRangeContentMD5 { - headers["x-ms-range-get-content-md5"] = "true" - } - } - if options.GetBlobOptions != nil { - headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return nil, err - } - return resp, err -} - -// SnapshotOptions includes the options for a snapshot blob operation -type SnapshotOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// CreateSnapshot creates a snapshot for a blob -// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx -func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { - params := url.Values{"comp": {"snapshot"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil || resp == nil { - return nil, err - } - defer drainRespBody(resp) - - if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { - return nil, err - } - - snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot")) - if snapshotResponse != "" { - snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) - if err != nil { - return nil, err - } - return &snapshotTimestamp, nil - } - - return nil, errors.New("Snapshot not created") -} - -// GetBlobPropertiesOptions includes the options for a get blob properties operation -type GetBlobPropertiesOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetProperties provides various information about the specified blob. -// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - return b.writeProperties(resp.Header, true) -} - -func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { - var err error - - contentLength := b.Properties.ContentLength - if includeContentLen { - contentLengthStr := h.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return err - } - } - } - - var sequenceNum int64 - sequenceNumStr := h.Get("x-ms-blob-sequence-number") - if sequenceNumStr != "" { - sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) - if err != nil { - return err - } - } - - lastModified, err := getTimeFromHeaders(h, "Last-Modified") - if err != nil { - return err - } - - copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") - if err != nil { - return err - } - - b.Properties = BlobProperties{ - LastModified: TimeRFC1123(*lastModified), - Etag: h.Get("Etag"), - ContentMD5: h.Get("Content-MD5"), - ContentLength: contentLength, - ContentEncoding: h.Get("Content-Encoding"), - ContentType: h.Get("Content-Type"), - ContentDisposition: h.Get("Content-Disposition"), - CacheControl: h.Get("Cache-Control"), - ContentLanguage: h.Get("Content-Language"), - SequenceNumber: sequenceNum, - CopyCompletionTime: TimeRFC1123(*copyCompletionTime), - CopyStatusDescription: h.Get("x-ms-copy-status-description"), - CopyID: h.Get("x-ms-copy-id"), - CopyProgress: h.Get("x-ms-copy-progress"), - CopySource: h.Get("x-ms-copy-source"), - CopyStatus: h.Get("x-ms-copy-status"), - BlobType: BlobType(h.Get("x-ms-blob-type")), - LeaseStatus: h.Get("x-ms-lease-status"), - LeaseState: h.Get("x-ms-lease-state"), - } - b.writeMetadata(h) - return nil -} - -// SetBlobPropertiesOptions contains various properties of a blob and is an entry -// in SetProperties -type SetBlobPropertiesOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - Origin string `header:"Origin"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - SequenceNumberAction *SequenceNumberAction - RequestID string `header:"x-ms-client-request-id"` -} - -// SequenceNumberAction defines how the blob's sequence number should be modified -type SequenceNumberAction string - -// Options for sequence number action -const ( - SequenceNumberActionMax SequenceNumberAction = "max" - SequenceNumberActionUpdate SequenceNumberAction = "update" - SequenceNumberActionIncrement SequenceNumberAction = "increment" -) - -// SetProperties replaces the BlobHeaders for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties -func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { - params := url.Values{"comp": {"properties"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - if b.Properties.BlobType == BlobTypePage { - headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) - if options != nil && options.SequenceNumberAction != nil { - headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) - if *options.SequenceNumberAction != SequenceNumberActionIncrement { - headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) - } - } - } - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// SetBlobMetadataOptions includes the options for a set blob metadata operation -type SetBlobMetadataOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetMetadata replaces the metadata for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := b.Container.bsc.client.getStandardHeaders() - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// GetBlobMetadataOptions includes the options for a get blob metadata operation -type GetBlobMetadataOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetMetadata returns all user-defined metadata for the specified blob. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - b.writeMetadata(resp.Header) - return nil -} - -func (b *Blob) writeMetadata(h http.Header) { - b.Metadata = BlobMetadata(writeMetadata(h)) -} - -// DeleteBlobOptions includes the options for a delete blob operation -type DeleteBlobOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - DeleteSnapshots *bool - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Delete deletes the given blob from the specified container. -// If the blob does not exists at the time of the Delete Blob operation, it -// returns error. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob -func (b *Blob) Delete(options *DeleteBlobOptions) error { - resp, err := b.delete(options) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusAccepted}) -} - -// DeleteIfExists deletes the given blob from the specified container If the -// blob is deleted with this call, returns true. Otherwise returns false. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob -func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { - resp, err := b.delete(options) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - if options.DeleteSnapshots != nil { - if *options.DeleteSnapshots { - headers["x-ms-delete-snapshots"] = "include" - } else { - headers["x-ms-delete-snapshots"] = "only" - } - } - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) -} - -// helper method to construct the path to either a blob or container -func pathForResource(container, name string) string { - if name != "" { - return fmt.Sprintf("/%s/%s", container, name) - } - return fmt.Sprintf("/%s", container) -} - -func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error { - defer drainRespBody(resp) - err := checkRespCode(resp, []int{http.StatusCreated}) - if err != nil { - return err - } - b.Properties.BlobType = bt - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go deleted file mode 100644 index 31894dbfc..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go +++ /dev/null @@ -1,174 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// OverrideHeaders defines overridable response heaedrs in -// a request using a SAS URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type OverrideHeaders struct { - CacheControl string - ContentDisposition string - ContentEncoding string - ContentLanguage string - ContentType string -} - -// BlobSASOptions are options to construct a blob SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type BlobSASOptions struct { - BlobServiceSASPermissions - OverrideHeaders - SASOptions -} - -// BlobServiceSASPermissions includes the available permissions for -// blob service SAS URI. -type BlobServiceSASPermissions struct { - Read bool - Add bool - Create bool - Write bool - Delete bool -} - -func (p BlobServiceSASPermissions) buildString() string { - permissions := "" - if p.Read { - permissions += "r" - } - if p.Add { - permissions += "a" - } - if p.Create { - permissions += "c" - } - if p.Write { - permissions += "w" - } - if p.Delete { - permissions += "d" - } - return permissions -} - -// GetSASURI creates an URL to the blob which contains the Shared -// Access Signature with the specified options. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { - uri := b.GetURL() - signedResource := "b" - canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) - if err != nil { - return "", err - } - - permissions := options.BlobServiceSASPermissions.buildString() - return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) -} - -func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { - start := "" - if options.Start != (time.Time{}) { - start = options.Start.UTC().Format(time.RFC3339) - } - - expiry := options.Expiry.UTC().Format(time.RFC3339) - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - protocols := "" - if options.UseHTTPS { - protocols = "https" - } - stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) - if err != nil { - return "", err - } - - sig := c.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {c.apiVersion}, - "se": {expiry}, - "sr": {signedResource}, - "sp": {permissions}, - "sig": {sig}, - } - - if start != "" { - sasParams.Add("st", start) - } - - if c.apiVersion >= "2015-04-05" { - if protocols != "" { - sasParams.Add("spr", protocols) - } - if options.IP != "" { - sasParams.Add("sip", options.IP) - } - } - - // Add override response hedaers - addQueryParameter(sasParams, "rscc", headers.CacheControl) - addQueryParameter(sasParams, "rscd", headers.ContentDisposition) - addQueryParameter(sasParams, "rsce", headers.ContentEncoding) - addQueryParameter(sasParams, "rscl", headers.ContentLanguage) - addQueryParameter(sasParams, "rsct", headers.ContentType) - - sasURL, err := url.Parse(uri) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { - rscc := headers.CacheControl - rscd := headers.ContentDisposition - rsce := headers.ContentEncoding - rscl := headers.ContentLanguage - rsct := headers.ContentType - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go deleted file mode 100644 index 02fa5929c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ /dev/null @@ -1,186 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties -func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { - return b.client.getServiceProperties(blobServiceName, b.auth) -} - -// SetServiceProperties sets the properties of your storage account's blob service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties -func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { - return b.client.setServiceProperties(props, blobServiceName, b.auth) -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// GetContainerReference returns a Container object for the specified container name. -func (b *BlobStorageClient) GetContainerReference(name string) *Container { - return &Container{ - bsc: b, - Name: name, - } -} - -// GetContainerReferenceFromSASURI returns a Container object for the specified -// container SASURI -func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { - path := strings.Split(sasuri.Path, "/") - if len(path) <= 1 { - return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) - } - c, err := newSASClientFromURL(&sasuri) - if err != nil { - return nil, err - } - cli := c.GetBlobService() - return &Container{ - bsc: &cli, - Name: path[1], - sasuri: sasuri, - }, nil -} - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - type ContainerAlias struct { - bsc *BlobStorageClient - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - Metadata BlobMetadata - sasuri url.URL - } - type ContainerListResponseAlias struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []ContainerAlias `xml:"Containers>Container"` - } - - var outAlias ContainerListResponseAlias - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - err = xmlUnmarshal(resp.Body, &outAlias) - if err != nil { - return nil, err - } - - out := ContainerListResponse{ - XMLName: outAlias.XMLName, - Xmlns: outAlias.Xmlns, - Prefix: outAlias.Prefix, - Marker: outAlias.Marker, - NextMarker: outAlias.NextMarker, - MaxResults: outAlias.MaxResults, - Containers: make([]Container, len(outAlias.Containers)), - } - for i, cnt := range outAlias.Containers { - out.Containers[i] = Container{ - bsc: &b, - Name: cnt.Name, - Properties: cnt.Properties, - Metadata: map[string]string(cnt.Metadata), - sasuri: cnt.sasuri, - } - } - - return &out, err -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func writeMetadata(h http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range h { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - return metadata -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go deleted file mode 100644 index c9c62d799..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ /dev/null @@ -1,270 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 100 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { - return b.CreateBlockBlobFromReader(nil, options) -} - -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// Any headers set in blob.Properties or metadata in blob.Metadata -// will be set on the blob. -// -// The API rejects requests with size > 256 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// To create a blob from scratch, call container.GetBlobReference() to -// get an empty blob, fill in blob.Properties and blob.Metadata as -// appropriate then call this method. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - - headers["Content-Length"] = "0" - var n int64 - var err error - if blob != nil { - type lener interface { - Len() int - } - // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. - if l, ok := blob.(lener); ok { - n = int64(l.Len()) - } else { - var buf bytes.Buffer - n, err = io.Copy(&buf, blob) - if err != nil { - return err - } - blob = &buf - } - - headers["Content-Length"] = strconv.FormatInt(n, 10) - } - b.Properties.ContentLength = n - - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeBlock) -} - -// PutBlockOptions includes the options for a put block operation -type PutBlockOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - ContentMD5 string `header:"Content-MD5"` - RequestID string `header:"x-ms-client-request-id"` -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 100 MiB (but this limit is not -// checked by the SDK). -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block -func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { - return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 100 MiB (but this limit is not -// checked by the SDK). -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block -func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { - query := url.Values{ - "comp": {"block"}, - "blockid": {blockID}, - } - headers := b.Container.bsc.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", size) - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypeBlock) -} - -// PutBlockListOptions includes the options for a put block list operation -type PutBlockListOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List -func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { - params := url.Values{"comp": {"blocklist"}} - blockListXML := prepareBlockListRequest(blocks) - headers := b.Container.bsc.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// GetBlockListOptions includes the options for a get block list operation -type GetBlockListOptions struct { - Timeout uint - Snapshot *time.Time - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List -func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { - params := url.Values{ - "comp": {"blocklist"}, - "blocklisttype": {string(blockType)}, - } - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - var out BlockListResponse - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go deleted file mode 100644 index 427558b5d..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ /dev/null @@ -1,988 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/http" - "net/url" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/version" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" -) - -const ( - // DefaultBaseURL is the domain name used for storage requests in the - // public cloud when a default client is created. - DefaultBaseURL = "core.windows.net" - - // DefaultAPIVersion is the Azure Storage API version string used when a - // basic client is created. - DefaultAPIVersion = "2016-05-31" - - defaultUseHTTPS = true - defaultRetryAttempts = 5 - defaultRetryDuration = time.Second * 5 - - // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountName = "devstoreaccount1" - - // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" - - blobServiceName = "blob" - tableServiceName = "table" - queueServiceName = "queue" - fileServiceName = "file" - - storageEmulatorBlob = "127.0.0.1:10000" - storageEmulatorTable = "127.0.0.1:10002" - storageEmulatorQueue = "127.0.0.1:10001" - - userAgentHeader = "User-Agent" - - userDefinedMetadataHeaderPrefix = "x-ms-meta-" - - connectionStringAccountName = "accountname" - connectionStringAccountKey = "accountkey" - connectionStringEndpointSuffix = "endpointsuffix" - connectionStringEndpointProtocol = "defaultendpointsprotocol" - - connectionStringBlobEndpoint = "blobendpoint" - connectionStringFileEndpoint = "fileendpoint" - connectionStringQueueEndpoint = "queueendpoint" - connectionStringTableEndpoint = "tableendpoint" - connectionStringSAS = "sharedaccesssignature" -) - -var ( - validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") - defaultValidStatusCodes = []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -// Sender sends a request -type Sender interface { - Send(*Client, *http.Request) (*http.Response, error) -} - -// DefaultSender is the default sender for the client. It implements -// an automatic retry strategy. -type DefaultSender struct { - RetryAttempts int - RetryDuration time.Duration - ValidStatusCodes []int - attempts int // used for testing -} - -// Send is the default retry strategy in the client -func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(req) - for attempts := 0; attempts < ds.RetryAttempts; attempts++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = c.HTTPClient.Do(rr.Request()) - if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { - return resp, err - } - drainRespBody(resp) - autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) - ds.attempts = attempts - } - ds.attempts++ - return resp, err -} - -// Client is the object that needs to be constructed to perform -// operations on the storage account. -type Client struct { - // HTTPClient is the http.Client used to initiate API - // requests. http.DefaultClient is used when creating a - // client. - HTTPClient *http.Client - - // Sender is an interface that sends the request. Clients are - // created with a DefaultSender. The DefaultSender has an - // automatic retry strategy built in. The Sender can be customized. - Sender Sender - - accountName string - accountKey []byte - useHTTPS bool - UseSharedKeyLite bool - baseURL string - apiVersion string - userAgent string - sasClient bool - accountSASToken url.Values -} - -type odataResponse struct { - resp *http.Response - odata odataErrorWrapper -} - -// AzureStorageServiceError contains fields of the error response from -// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx -// Some fields might be specific to certain calls. -type AzureStorageServiceError struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` - QueryParameterName string `xml:"QueryParameterName"` - QueryParameterValue string `xml:"QueryParameterValue"` - Reason string `xml:"Reason"` - Lang string - StatusCode int - RequestID string - Date string - APIVersion string -} - -type odataErrorMessage struct { - Lang string `json:"lang"` - Value string `json:"value"` -} - -type odataError struct { - Code string `json:"code"` - Message odataErrorMessage `json:"message"` -} - -type odataErrorWrapper struct { - Err odataError `json:"odata.error"` -} - -// UnexpectedStatusCodeError is returned when a storage service responds with neither an error -// nor with an HTTP status code indicating success. -type UnexpectedStatusCodeError struct { - allowed []int - got int - inner error -} - -func (e UnexpectedStatusCodeError) Error() string { - s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } - - got := s(e.got) - expected := []string{} - for _, v := range e.allowed { - expected = append(expected, s(v)) - } - return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner) -} - -// Got is the actual status code returned by Azure. -func (e UnexpectedStatusCodeError) Got() int { - return e.got -} - -// Inner returns any inner error info. -func (e UnexpectedStatusCodeError) Inner() error { - return e.inner -} - -// NewClientFromConnectionString creates a Client from the connection string. -func NewClientFromConnectionString(input string) (Client, error) { - // build a map of connection string key/value pairs - parts := map[string]string{} - for _, pair := range strings.Split(input, ";") { - if pair == "" { - continue - } - - equalDex := strings.IndexByte(pair, '=') - if equalDex <= 0 { - return Client{}, fmt.Errorf("Invalid connection segment %q", pair) - } - - value := strings.TrimSpace(pair[equalDex+1:]) - key := strings.TrimSpace(strings.ToLower(pair[:equalDex])) - parts[key] = value - } - - // TODO: validate parameter sets? - - if parts[connectionStringAccountName] == StorageEmulatorAccountName { - return NewEmulatorClient() - } - - if parts[connectionStringSAS] != "" { - endpoint := "" - if parts[connectionStringBlobEndpoint] != "" { - endpoint = parts[connectionStringBlobEndpoint] - } else if parts[connectionStringFileEndpoint] != "" { - endpoint = parts[connectionStringFileEndpoint] - } else if parts[connectionStringQueueEndpoint] != "" { - endpoint = parts[connectionStringQueueEndpoint] - } else { - endpoint = parts[connectionStringTableEndpoint] - } - - return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS]) - } - - useHTTPS := defaultUseHTTPS - if parts[connectionStringEndpointProtocol] != "" { - useHTTPS = parts[connectionStringEndpointProtocol] == "https" - } - - return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey], - parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS) -} - -// NewBasicClient constructs a Client with given storage service name and -// key. -func NewBasicClient(accountName, accountKey string) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) -} - -// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and -// key in the referenced cloud. -func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) -} - -//NewEmulatorClient contructs a Client intended to only work with Azure -//Storage Emulator -func NewEmulatorClient() (Client, error) { - return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) -} - -// NewClient constructs a Client. This should be used if the caller wants -// to specify whether to use HTTPS, a specific REST API version or a custom -// storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { - var c Client - if !IsValidStorageAccount(accountName) { - return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) - } else if accountKey == "" { - return c, fmt.Errorf("azure: account key required") - } else if serviceBaseURL == "" { - return c, fmt.Errorf("azure: base storage service url required") - } - - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return c, fmt.Errorf("azure: malformed storage account key: %v", err) - } - - c = Client{ - HTTPClient: http.DefaultClient, - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: serviceBaseURL, - apiVersion: apiVersion, - sasClient: false, - UseSharedKeyLite: false, - Sender: &DefaultSender{ - RetryAttempts: defaultRetryAttempts, - ValidStatusCodes: defaultValidStatusCodes, - RetryDuration: defaultRetryDuration, - }, - } - c.userAgent = c.getDefaultUserAgent() - return c, nil -} - -// IsValidStorageAccount checks if the storage account name is valid. -// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account -func IsValidStorageAccount(account string) bool { - return validStorageAccount.MatchString(account) -} - -// NewAccountSASClient contructs a client that uses accountSAS authorization -// for its operations. -func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { - return newSASClient(account, env.StorageEndpointSuffix, token) -} - -// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization -// for its operations using the specified endpoint and SAS token. -func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) { - u, err := url.Parse(endpoint) - if err != nil { - return Client{}, err - } - _, err = url.ParseQuery(sasToken) - if err != nil { - return Client{}, err - } - u.RawQuery = sasToken - return newSASClientFromURL(u) -} - -func newSASClient(accountName, baseURL string, sasToken url.Values) Client { - c := Client{ - HTTPClient: http.DefaultClient, - apiVersion: DefaultAPIVersion, - sasClient: true, - Sender: &DefaultSender{ - RetryAttempts: defaultRetryAttempts, - ValidStatusCodes: defaultValidStatusCodes, - RetryDuration: defaultRetryDuration, - }, - accountName: accountName, - baseURL: baseURL, - accountSASToken: sasToken, - } - c.userAgent = c.getDefaultUserAgent() - // Get API version and protocol from token - c.apiVersion = sasToken.Get("sv") - c.useHTTPS = sasToken.Get("spr") == "https" - return c -} - -func newSASClientFromURL(u *url.URL) (Client, error) { - // the host name will look something like this - // - foo.blob.core.windows.net - // "foo" is the account name - // "core.windows.net" is the baseURL - - // find the first dot to get account name - i1 := strings.IndexByte(u.Host, '.') - if i1 < 0 { - return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host) - } - - // now find the second dot to get the base URL - i2 := strings.IndexByte(u.Host[i1+1:], '.') - if i2 < 0 { - return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:]) - } - - sasToken := u.Query() - c := newSASClient(u.Host[:i1], u.Host[i1+i2+2:], sasToken) - if spr := sasToken.Get("spr"); spr == "" { - // infer from URL if not in the query params set - c.useHTTPS = u.Scheme == "https" - } - return c, nil -} - -func (c Client) isServiceSASClient() bool { - return c.sasClient && c.accountSASToken == nil -} - -func (c Client) isAccountSASClient() bool { - return c.sasClient && c.accountSASToken != nil -} - -func (c Client) getDefaultUserAgent() string { - return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - version.Number, - c.apiVersion, - ) -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) -} - -// protectUserAgent is used in funcs that include extraheaders as a parameter. -// It prevents the User-Agent header to be overwritten, instead if it happens to -// be present, it gets added to the current User-Agent. Use it before getStandardHeaders -func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { - if v, ok := extraheaders[userAgentHeader]; ok { - c.AddToUserAgent(v) - delete(extraheaders, userAgentHeader) - } - return extraheaders -} - -func (c Client) getBaseURL(service string) *url.URL { - scheme := "http" - if c.useHTTPS { - scheme = "https" - } - host := "" - if c.accountName == StorageEmulatorAccountName { - switch service { - case blobServiceName: - host = storageEmulatorBlob - case tableServiceName: - host = storageEmulatorTable - case queueServiceName: - host = storageEmulatorQueue - } - } else { - host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) - } - - return &url.URL{ - Scheme: scheme, - Host: host, - } -} - -func (c Client) getEndpoint(service, path string, params url.Values) string { - u := c.getBaseURL(service) - - // API doesn't accept path segments not starting with '/' - if !strings.HasPrefix(path, "/") { - path = fmt.Sprintf("/%v", path) - } - - if c.accountName == StorageEmulatorAccountName { - path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) - } - - u.Path = path - u.RawQuery = params.Encode() - return u.String() -} - -// AccountSASTokenOptions includes options for constructing -// an account SAS token. -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas -type AccountSASTokenOptions struct { - APIVersion string - Services Services - ResourceTypes ResourceTypes - Permissions Permissions - Start time.Time - Expiry time.Time - IP string - UseHTTPS bool -} - -// Services specify services accessible with an account SAS. -type Services struct { - Blob bool - Queue bool - Table bool - File bool -} - -// ResourceTypes specify the resources accesible with an -// account SAS. -type ResourceTypes struct { - Service bool - Container bool - Object bool -} - -// Permissions specifies permissions for an accountSAS. -type Permissions struct { - Read bool - Write bool - Delete bool - List bool - Add bool - Create bool - Update bool - Process bool -} - -// GetAccountSASToken creates an account SAS token -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas -func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { - if options.APIVersion == "" { - options.APIVersion = c.apiVersion - } - - if options.APIVersion < "2015-04-05" { - return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) - } - - // build services string - services := "" - if options.Services.Blob { - services += "b" - } - if options.Services.Queue { - services += "q" - } - if options.Services.Table { - services += "t" - } - if options.Services.File { - services += "f" - } - - // build resources string - resources := "" - if options.ResourceTypes.Service { - resources += "s" - } - if options.ResourceTypes.Container { - resources += "c" - } - if options.ResourceTypes.Object { - resources += "o" - } - - // build permissions string - permissions := "" - if options.Permissions.Read { - permissions += "r" - } - if options.Permissions.Write { - permissions += "w" - } - if options.Permissions.Delete { - permissions += "d" - } - if options.Permissions.List { - permissions += "l" - } - if options.Permissions.Add { - permissions += "a" - } - if options.Permissions.Create { - permissions += "c" - } - if options.Permissions.Update { - permissions += "u" - } - if options.Permissions.Process { - permissions += "p" - } - - // build start time, if exists - start := "" - if options.Start != (time.Time{}) { - start = options.Start.UTC().Format(time.RFC3339) - } - - // build expiry time - expiry := options.Expiry.UTC().Format(time.RFC3339) - - protocol := "https,http" - if options.UseHTTPS { - protocol = "https" - } - - stringToSign := strings.Join([]string{ - c.accountName, - permissions, - services, - resources, - start, - expiry, - options.IP, - protocol, - options.APIVersion, - "", - }, "\n") - signature := c.computeHmac256(stringToSign) - - sasParams := url.Values{ - "sv": {options.APIVersion}, - "ss": {services}, - "srt": {resources}, - "sp": {permissions}, - "se": {expiry}, - "spr": {protocol}, - "sig": {signature}, - } - if start != "" { - sasParams.Add("st", start) - } - if options.IP != "" { - sasParams.Add("sip", options.IP) - } - - return sasParams, nil -} - -// GetBlobService returns a BlobStorageClient which can operate on the blob -// service of the storage account. -func (c Client) GetBlobService() BlobStorageClient { - b := BlobStorageClient{ - client: c, - } - b.client.AddToUserAgent(blobServiceName) - b.auth = sharedKey - if c.UseSharedKeyLite { - b.auth = sharedKeyLite - } - return b -} - -// GetQueueService returns a QueueServiceClient which can operate on the queue -// service of the storage account. -func (c Client) GetQueueService() QueueServiceClient { - q := QueueServiceClient{ - client: c, - } - q.client.AddToUserAgent(queueServiceName) - q.auth = sharedKey - if c.UseSharedKeyLite { - q.auth = sharedKeyLite - } - return q -} - -// GetTableService returns a TableServiceClient which can operate on the table -// service of the storage account. -func (c Client) GetTableService() TableServiceClient { - t := TableServiceClient{ - client: c, - } - t.client.AddToUserAgent(tableServiceName) - t.auth = sharedKeyForTable - if c.UseSharedKeyLite { - t.auth = sharedKeyLiteForTable - } - return t -} - -// GetFileService returns a FileServiceClient which can operate on the file -// service of the storage account. -func (c Client) GetFileService() FileServiceClient { - f := FileServiceClient{ - client: c, - } - f.client.AddToUserAgent(fileServiceName) - f.auth = sharedKey - if c.UseSharedKeyLite { - f.auth = sharedKeyLite - } - return f -} - -func (c Client) getStandardHeaders() map[string]string { - return map[string]string{ - userAgentHeader: c.userAgent, - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), - } -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(verb, url, body) - if err != nil { - return nil, errors.New("azure/storage: error creating request: " + err.Error()) - } - - // http.NewRequest() will automatically set req.ContentLength for a handful of types - // otherwise we will handle here. - if req.ContentLength < 1 { - if clstr, ok := headers["Content-Length"]; ok { - if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil { - req.ContentLength = cl - } - } - } - - for k, v := range headers { - req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 - } - - if c.isAccountSASClient() { - // append the SAS token to the query params - v := req.URL.Query() - v = mergeParams(v, c.accountSASToken) - req.URL.RawQuery = v.Encode() - } - - resp, err := c.Sender.Send(&c, req) - if err != nil { - return nil, err - } - - if resp.StatusCode >= 400 && resp.StatusCode <= 505 { - return resp, getErrorFromResponse(resp) - } - - return resp, nil -} - -func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { - headers, err := c.addAuthorizationHeader(verb, url, headers, auth) - if err != nil { - return nil, nil, nil, err - } - - req, err := http.NewRequest(verb, url, body) - for k, v := range headers { - req.Header.Add(k, v) - } - - resp, err := c.Sender.Send(&c, req) - if err != nil { - return nil, nil, nil, err - } - - respToRet := &odataResponse{resp: resp} - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, nil, nil, err - } - - requestID, date, version := getDebugHeaders(resp.Header) - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) - return respToRet, req, resp, err - } - // try unmarshal as odata.error json - err = json.Unmarshal(respBody, &respToRet.odata) - } - - return respToRet, req, resp, err -} - -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { - respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) - return respToRet, err -} - -func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { - // execute common query, get back generated request, response etc... for more processing. - respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) - if err != nil { - return nil, err - } - - // return the OData in the case of executing batch commands. - // In this case we need to read the outer batch boundary and contents. - // Then we read the changeset information within the batch - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, err - } - - // outer multipart body - _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) - if err != nil { - return nil, err - } - - // batch details. - batchBoundary := batchHeader["boundary"] - batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) - if err != nil { - return nil, err - } - - // changeset details. - err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) - if err != nil { - return nil, err - } - - return respToRet, nil -} - -func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { - changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) - changesetPart, err := changesetMultiReader.NextPart() - if err != nil { - return err - } - - changesetPartBufioReader := bufio.NewReader(changesetPart) - changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) - if err != nil { - return err - } - - if changesetResp.StatusCode != http.StatusNoContent { - changesetBody, err := readAndCloseBody(changesetResp.Body) - err = json.Unmarshal(changesetBody, &respToRet.odata) - if err != nil { - return err - } - respToRet.resp = changesetResp - } - - return nil -} - -func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { - respBodyString := string(respBody) - respBodyReader := strings.NewReader(respBodyString) - - // reading batchresponse - batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) - batchPart, err := batchMultiReader.NextPart() - if err != nil { - return nil, "", err - } - batchPartBufioReader := bufio.NewReader(batchPart) - - _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) - if err != nil { - return nil, "", err - } - changesetBoundary := changesetHeader["boundary"] - return batchPartBufioReader, changesetBoundary, nil -} - -func readAndCloseBody(body io.ReadCloser) ([]byte, error) { - defer body.Close() - out, err := ioutil.ReadAll(body) - if err == io.EOF { - err = nil - } - return out, err -} - -// reads the response body then closes it -func drainRespBody(resp *http.Response) { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() -} - -func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { - if err := xml.Unmarshal(body, storageErr); err != nil { - storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) - return err - } - return nil -} - -func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { - odataError := odataErrorWrapper{} - if err := json.Unmarshal(body, &odataError); err != nil { - storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) - return err - } - storageErr.Code = odataError.Err.Code - storageErr.Message = odataError.Err.Message.Value - storageErr.Lang = odataError.Err.Message.Lang - return nil -} - -func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { - return AzureStorageServiceError{ - StatusCode: code, - Code: status, - RequestID: requestID, - Date: date, - APIVersion: version, - Message: "no response body was available for error status code", - } -} - -func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) -} - -// checkRespCode returns UnexpectedStatusError if the given response code is not -// one of the allowed status codes; otherwise nil. -func checkRespCode(resp *http.Response, allowed []int) error { - for _, v := range allowed { - if resp.StatusCode == v { - return nil - } - } - err := getErrorFromResponse(resp) - return UnexpectedStatusCodeError{ - allowed: allowed, - got: resp.StatusCode, - inner: err, - } -} - -func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { - metadata = c.protectUserAgent(metadata) - for k, v := range metadata { - h[userDefinedMetadataHeaderPrefix+k] = v - } - return h -} - -func getDebugHeaders(h http.Header) (requestID, date, version string) { - requestID = h.Get("x-ms-request-id") - version = h.Get("x-ms-version") - date = h.Get("Date") - return -} - -func getErrorFromResponse(resp *http.Response) error { - respBody, err := readAndCloseBody(resp.Body) - if err != nil { - return err - } - - requestID, date, version := getDebugHeaders(resp.Header) - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) - } else { - storageErr := AzureStorageServiceError{ - StatusCode: resp.StatusCode, - RequestID: requestID, - Date: date, - APIVersion: version, - } - // response contains storage service error object, unmarshal - if resp.Header.Get("Content-Type") == "application/xml" { - errIn := serviceErrFromXML(respBody, &storageErr) - if err != nil { // error unmarshaling the error response - err = errIn - } - } else { - errIn := serviceErrFromJSON(respBody, &storageErr) - if err != nil { // error unmarshaling the error response - err = errIn - } - } - err = storageErr - } - return err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go deleted file mode 100644 index e898e9bfa..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go +++ /dev/null @@ -1,38 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/url" - "time" -) - -// SASOptions includes options used by SAS URIs for different -// services and resources. -type SASOptions struct { - APIVersion string - Start time.Time - Expiry time.Time - IP string - UseHTTPS bool - Identifier string -} - -func addQueryParameter(query url.Values, key, value string) url.Values { - if value != "" { - query.Add(key, value) - } - return query -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go deleted file mode 100644 index 056473d49..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ /dev/null @@ -1,640 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// Container represents an Azure container. -type Container struct { - bsc *BlobStorageClient - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - Metadata map[string]string - sasuri url.URL -} - -// Client returns the HTTP client used by the Container reference. -func (c *Container) Client() *Client { - return &c.bsc.client -} - -func (c *Container) buildPath() string { - return fmt.Sprintf("/%s", c.Name) -} - -// GetURL gets the canonical URL to the container. -// This method does not create a publicly accessible URL if the container -// is private and this method does not check if the blob exists. -func (c *Container) GetURL() string { - container := c.Name - if container == "" { - container = "$root" - } - return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) -} - -// ContainerSASOptions are options to construct a container SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type ContainerSASOptions struct { - ContainerSASPermissions - OverrideHeaders - SASOptions -} - -// ContainerSASPermissions includes the available permissions for -// a container SAS URI. -type ContainerSASPermissions struct { - BlobServiceSASPermissions - List bool -} - -// GetSASURI creates an URL to the container which contains the Shared -// Access Signature with the specified options. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { - uri := c.GetURL() - signedResource := "c" - canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) - if err != nil { - return "", err - } - - // build permissions string - permissions := options.BlobServiceSASPermissions.buildString() - if options.List { - permissions += "l" - } - - return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - PublicAccess ContainerAccessType `xml:"PublicAccess"` -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// IncludeBlobDataset has options to include in a list blobs operation -type IncludeBlobDataset struct { - Snapshots bool - Metadata bool - UncommittedBlobs bool - Copy bool -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include *IncludeBlobDataset - MaxResults uint - Timeout uint - RequestID string -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != nil { - include := []string{} - include = addString(include, p.Include.Snapshots, "snapshots") - include = addString(include, p.Include.Metadata, "metadata") - include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") - include = addString(include, p.Include.Copy, "copy") - fullInclude := strings.Join(include, ",") - out.Set("include", fullInclude) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func addString(datasets []string, include bool, text string) []string { - if include { - datasets = append(datasets, text) - } - return datasets -} - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// ContainerAccessPolicy represents each access policy in the container ACL. -type ContainerAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanWrite bool - CanDelete bool -} - -// ContainerPermissions represents the container ACLs. -type ContainerPermissions struct { - AccessType ContainerAccessType - AccessPolicies []ContainerAccessPolicy -} - -// ContainerAccessHeader references header used when setting/getting container ACL -const ( - ContainerAccessHeader string = "x-ms-blob-public-access" -) - -// GetBlobReference returns a Blob object for the specified blob name. -func (c *Container) GetBlobReference(name string) *Blob { - return &Blob{ - Container: c, - Name: name, - } -} - -// CreateContainerOptions includes the options for a create container operation -type CreateContainerOptions struct { - Timeout uint - Access ContainerAccessType `header:"x-ms-blob-public-access"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Create creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container -func (c *Container) Create(options *CreateContainerOptions) error { - resp, err := c.create(options) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// CreateIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { - resp, err := c.create(options) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { - return resp.StatusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) { - query := url.Values{"restype": {"container"}} - headers := c.bsc.client.getStandardHeaders() - headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) - - return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) -} - -// Exists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (c *Container) Exists() (bool, error) { - q := url.Values{"restype": {"container"}} - var uri string - if c.bsc.client.isServiceSASClient() { - q = mergeParams(q, c.sasuri.Query()) - newURI := c.sasuri - newURI.RawQuery = q.Encode() - uri = newURI.String() - - } else { - uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) - } - headers := c.bsc.client.getStandardHeaders() - - resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, nil - } - } - return false, err -} - -// SetContainerPermissionOptions includes options for a set container permissions operation -type SetContainerPermissionOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetPermissions sets up container permissions -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL -func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { - body, length, err := generateContainerACLpayload(permissions.AccessPolicies) - if err != nil { - return err - } - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - headers := c.bsc.client.getStandardHeaders() - headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) - headers["Content-Length"] = strconv.Itoa(length) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// GetContainerPermissionOptions includes options for a get container permissions operation -type GetContainerPermissionOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx -// If timeout is 0 then it will not be passed to Azure -// leaseID will only be passed to Azure if populated -func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var ap AccessPolicy - err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return buildAccessPolicy(ap, &resp.Header), nil -} - -func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { - // containerAccess. Blob, Container, empty - containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) - permissions := ContainerPermissions{ - AccessType: ContainerAccessType(containerAccess), - AccessPolicies: []ContainerAccessPolicy{}, - } - - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - capd := ContainerAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") - capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - permissions.AccessPolicies = append(permissions.AccessPolicies, capd) - } - return &permissions -} - -// DeleteContainerOptions includes options for a delete container operation -type DeleteContainerOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// Delete deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container -func (c *Container) Delete(options *DeleteContainerOptions) error { - resp, err := c.delete(options) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusAccepted}) -} - -// DeleteIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container -func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { - resp, err := c.delete(options) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) { - query := url.Values{"restype": {"container"}} - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) - - return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs -func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}, - }) - var uri string - if c.bsc.client.isServiceSASClient() { - q = mergeParams(q, c.sasuri.Query()) - newURI := c.sasuri - newURI.RawQuery = q.Encode() - uri = newURI.String() - } else { - uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) - } - - headers := c.bsc.client.getStandardHeaders() - headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) - - var out BlobListResponse - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - for i := range out.Blobs { - out.Blobs[i].Container = c - } - return out, err -} - -// ContainerMetadataOptions includes options for container metadata operations -type ContainerMetadataOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// SetMetadata replaces the metadata for the specified container. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata -func (c *Container) SetMetadata(options *ContainerMetadataOptions) error { - params := url.Values{ - "comp": {"metadata"}, - "restype": {"container"}, - } - headers := c.bsc.client.getStandardHeaders() - headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusOK}) -} - -// GetMetadata returns all user-defined metadata for the specified container. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata -func (c *Container) GetMetadata(options *ContainerMetadataOptions) error { - params := url.Values{ - "comp": {"metadata"}, - "restype": {"container"}, - } - headers := c.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - c.writeMetadata(resp.Header) - return nil -} - -func (c *Container) writeMetadata(h http.Header) { - c.Metadata = writeMetadata(h) -} - -func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, capd := range policies { - permission := capd.generateContainerPermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { - // generate the permissions string (rwd). - // still want the end user API to have bool flags. - permissions = "" - - if capd.CanRead { - permissions += "r" - } - - if capd.CanWrite { - permissions += "w" - } - - if capd.CanDelete { - permissions += "d" - } - - return permissions -} - -// GetProperties updated the properties of the container. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties -func (c *Container) GetProperties() error { - params := url.Values{ - "restype": {"container"}, - } - headers := c.bsc.client.getStandardHeaders() - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) - - resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) - if err != nil { - return err - } - defer resp.Body.Close() - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - // update properties - c.Properties.Etag = resp.Header.Get(headerEtag) - c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status") - c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state") - c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration") - c.Properties.LastModified = resp.Header.Get("Last-Modified") - c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader)) - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go deleted file mode 100644 index 151e9a510..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go +++ /dev/null @@ -1,237 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// CopyOptions includes the options for a copy blob operation -type CopyOptions struct { - Timeout uint - Source CopyOptionsConditions - Destiny CopyOptionsConditions - RequestID string -} - -// IncrementalCopyOptions includes the options for an incremental copy blob operation -type IncrementalCopyOptions struct { - Timeout uint - Destination IncrementalCopyOptionsConditions - RequestID string -} - -// CopyOptionsConditions includes some conditional options in a copy blob operation -type CopyOptionsConditions struct { - LeaseID string - IfModifiedSince *time.Time - IfUnmodifiedSince *time.Time - IfMatch string - IfNoneMatch string -} - -// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation -type IncrementalCopyOptionsConditions struct { - IfModifiedSince *time.Time - IfUnmodifiedSince *time.Time - IfMatch string - IfNoneMatch string -} - -// Copy starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using the GetURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob -func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { - copyID, err := b.StartCopy(sourceBlob, options) - if err != nil { - return err - } - - return b.WaitForCopy(copyID) -} - -// StartCopy starts a blob copy operation. -// sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using the GetURL method.) -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob -func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - // source - headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) - headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) - headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) - //destiny - headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) - headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) - headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return "", err - } - defer drainRespBody(resp) - - if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.Header.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -// AbortCopyOptions includes the options for an abort blob operation -type AbortCopyOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. -// copyID is generated from StartBlobCopy function. -// currentLeaseID is required IF the destination blob has an active lease on it. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob -func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { - params := url.Values{ - "comp": {"copy"}, - "copyid": {copyID}, - } - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-action"] = "abort" - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) -func (b *Blob) WaitForCopy(copyID string) error { - for { - err := b.GetProperties(nil) - if err != nil { - return err - } - - if b.Properties.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch b.Properties.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) - } - } -} - -// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob -// sourceBlob parameter must be a valid snapshot URL of the original blob. -// THe original blob mut be public, or use a Shared Access Signature. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . -func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { - params := url.Values{"comp": {"incrementalcopy"}} - - // need formatting to 7 decimal places so it's friendly to Windows and *nix - snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") - u, err := url.Parse(sourceBlobURL) - if err != nil { - return "", err - } - query := u.Query() - query.Add("snapshot", snapshotTimeFormatted) - encodedQuery := query.Encode() - encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) - u.RawQuery = encodedQuery - snapshotURL := u.String() - - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-copy-source"] = snapshotURL - - if options != nil { - addTimeout(params, options.Timeout) - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) - headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) - headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) - headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) - } - - // get URI of destination blob - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return "", err - } - defer drainRespBody(resp) - - if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil { - return "", err - } - - copyID := resp.Header.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go deleted file mode 100644 index 2e805e7df..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ /dev/null @@ -1,238 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "net/http" - "net/url" - "sync" -) - -// Directory represents a directory on a share. -type Directory struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties DirectoryProperties - share *Share -} - -// DirectoryProperties contains various properties of a directory. -type DirectoryProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` -} - -// ListDirsAndFilesParameters defines the set of customizable parameters to -// make a List Files and Directories call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -type ListDirsAndFilesParameters struct { - Prefix string - Marker string - MaxResults uint - Timeout uint -} - -// DirsAndFilesListResponse contains the response fields from -// a List Files and Directories call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -type DirsAndFilesListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Marker string `xml:"Marker"` - MaxResults int64 `xml:"MaxResults"` - Directories []Directory `xml:"Entries>Directory"` - Files []File `xml:"Entries>File"` - NextMarker string `xml:"NextMarker"` -} - -// builds the complete directory path for this directory object. -func (d *Directory) buildPath() string { - path := "" - current := d - for current.Name != "" { - path = "/" + current.Name + path - current = current.parent - } - return d.share.buildPath() + path -} - -// Create this directory in the associated share. -// If a directory with the same name already exists, the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory -func (d *Directory) Create(options *FileRequestOptions) error { - // if this is the root directory exit early - if d.parent == nil { - return nil - } - - params := prepareOptions(options) - headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this directory under the associated share if the -// directory does not exists. Returns true if the directory is newly created or -// false if the directory already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory -func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { - // if this is the root directory exit early - if d.parent == nil { - return false, nil - } - - params := prepareOptions(options) - resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { - if resp.StatusCode == http.StatusCreated { - d.updateEtagAndLastModified(resp.Header) - return true, nil - } - - return false, d.FetchAttributes(nil) - } - } - - return false, err -} - -// Delete removes this directory. It must be empty in order to be deleted. -// If the directory does not exist the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory -func (d *Directory) Delete(options *FileRequestOptions) error { - return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) -} - -// DeleteIfExists removes this directory if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory -func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this directory exists. -func (d *Directory) Exists() (bool, error) { - exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) - if exists { - d.updateEtagAndLastModified(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata for this directory. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties -func (d *Directory) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - d.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetDirectoryReference returns a child Directory object for this directory. -func (d *Directory) GetDirectoryReference(name string) *Directory { - return &Directory{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - } -} - -// GetFileReference returns a child File object for this directory. -func (d *Directory) GetFileReference(name string) *File { - return &File{ - fsc: d.fsc, - Name: name, - parent: d, - share: d.share, - mutex: &sync.Mutex{}, - } -} - -// ListDirsAndFiles returns a list of files and directories under this directory. -// It also contains a pagination token and other response details. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files -func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { - q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) - - resp, err := d.fsc.listContent(d.buildPath(), q, nil) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - var out DirsAndFilesListResponse - err = xmlUnmarshal(resp.Body, &out) - return &out, err -} - -// SetMetadata replaces the metadata for this directory. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetDirectoryMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata -func (d *Directory) SetMetadata(options *FileRequestOptions) error { - headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) - if err != nil { - return err - } - - d.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (d *Directory) updateEtagAndLastModified(headers http.Header) { - d.Properties.Etag = headers.Get("Etag") - d.Properties.LastModified = headers.Get("Last-Modified") -} - -// URL gets the canonical URL to this directory. -// This method does not create a publicly accessible URL if the directory -// is private and this method does not check if the directory exists. -func (d *Directory) URL() string { - return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go deleted file mode 100644 index fbbcb93ba..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ /dev/null @@ -1,456 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/satori/go.uuid" -) - -// Annotating as secure for gas scanning -/* #nosec */ -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - etagErrorTemplate = "Etag didn't match: %v" -) - -var ( - errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") - errNilPreviousResult = errors.New("The previous results page is nil") - errNilNextLink = errors.New("There are no more pages in this query results") -) - -// Entity represents an entity inside an Azure table. -type Entity struct { - Table *Table - PartitionKey string - RowKey string - TimeStamp time.Time - OdataMetadata string - OdataType string - OdataID string - OdataEtag string - OdataEditLink string - Properties map[string]interface{} -} - -// GetEntityReference returns an Entity object with the specified -// partition key and row key. -func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { - return &Entity{ - PartitionKey: partitionKey, - RowKey: rowKey, - Table: t, - } -} - -// EntityOptions includes options for entity operations. -type EntityOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// GetEntityOptions includes options for a get entity operation -type GetEntityOptions struct { - Select []string - RequestID string `header:"x-ms-client-request-id"` -} - -// Get gets the referenced entity. Which properties to get can be -// specified using the select option. -// See: -// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities -func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { - if ml == EmptyPayload { - return errEmptyPayload - } - // RowKey and PartitionKey could be lost if not included in the query - // As those are the entity identifiers, it is best if they are not lost - rk := e.RowKey - pk := e.PartitionKey - - query := url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - } - headers := e.Table.tsc.client.getStandardHeaders() - headers[headerAccept] = string(ml) - - if options != nil { - if len(options.Select) > 0 { - query.Add("$select", strings.Join(options.Select, ",")) - } - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - err = json.Unmarshal(respBody, e) - if err != nil { - return err - } - e.PartitionKey = pk - e.RowKey = rk - - return nil -} - -// Insert inserts the referenced entity in its table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -// ml determines the level of detail of metadata in the operation response, -// or no data at all. -// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity -func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addReturnContentHeaders(headers, ml) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - if ml != EmptyPayload { - if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil { - return err - } - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if err = e.UnmarshalJSON(data); err != nil { - return err - } - } else { - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - } - - return nil -} - -// Update updates the contents of an entity. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or if the ETag is different -// than the one in Azure. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 -func (e *Entity) Update(force bool, options *EntityOptions) error { - return e.updateMerge(force, http.MethodPut, options) -} - -// Merge merges the contents of entity specified with PartitionKey and RowKey -// with the content specified in Properties. -// The function fails if there is no entity with the same PartitionKey and -// RowKey in the table or if the ETag is different than the one in Azure. -// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity -func (e *Entity) Merge(force bool, options *EntityOptions) error { - return e.updateMerge(force, "MERGE", options) -} - -// Delete deletes the entity. -// The function fails if there is no entity with the same PartitionKey and -// RowKey in the table or if the ETag is different than the one in Azure. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 -func (e *Entity) Delete(force bool, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - headers = addIfMatchHeader(headers, force, e.OdataEtag) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) - if err != nil { - if resp.StatusCode == http.StatusPreconditionFailed { - return fmt.Errorf(etagErrorTemplate, err) - } - return err - } - defer drainRespBody(resp) - - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateTimestamp(resp.Header) -} - -// InsertOrReplace inserts an entity or replaces the existing one. -// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity -func (e *Entity) InsertOrReplace(options *EntityOptions) error { - return e.insertOr(http.MethodPut, options) -} - -// InsertOrMerge inserts an entity or merges the existing one. -// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity -func (e *Entity) InsertOrMerge(options *EntityOptions) error { - return e.insertOr("MERGE", options) -} - -func (e *Entity) buildPath() string { - return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) -} - -// MarshalJSON is a custom marshaller for entity -func (e *Entity) MarshalJSON() ([]byte, error) { - completeMap := map[string]interface{}{} - completeMap[partitionKeyNode] = e.PartitionKey - completeMap[rowKeyNode] = e.RowKey - for k, v := range e.Properties { - typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") - switch t := v.(type) { - case []byte: - completeMap[typeKey] = OdataBinary - completeMap[k] = t - case time.Time: - completeMap[typeKey] = OdataDateTime - completeMap[k] = t.Format(time.RFC3339Nano) - case uuid.UUID: - completeMap[typeKey] = OdataGUID - completeMap[k] = t.String() - case int64: - completeMap[typeKey] = OdataInt64 - completeMap[k] = fmt.Sprintf("%v", v) - default: - completeMap[k] = v - } - if strings.HasSuffix(k, OdataTypeSuffix) { - if !(completeMap[k] == OdataBinary || - completeMap[k] == OdataDateTime || - completeMap[k] == OdataGUID || - completeMap[k] == OdataInt64) { - return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) - } - valueKey := strings.TrimSuffix(k, OdataTypeSuffix) - if _, ok := completeMap[valueKey]; !ok { - return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) - } - } - } - return json.Marshal(completeMap) -} - -// UnmarshalJSON is a custom unmarshaller for entities -func (e *Entity) UnmarshalJSON(data []byte) error { - errorTemplate := "Deserializing error: %v" - - props := map[string]interface{}{} - err := json.Unmarshal(data, &props) - if err != nil { - return err - } - - // deselialize metadata - e.OdataMetadata = stringFromMap(props, "odata.metadata") - e.OdataType = stringFromMap(props, "odata.type") - e.OdataID = stringFromMap(props, "odata.id") - e.OdataEtag = stringFromMap(props, "odata.etag") - e.OdataEditLink = stringFromMap(props, "odata.editLink") - e.PartitionKey = stringFromMap(props, partitionKeyNode) - e.RowKey = stringFromMap(props, rowKeyNode) - - // deserialize timestamp - timeStamp, ok := props["Timestamp"] - if ok { - str, ok := timeStamp.(string) - if !ok { - return fmt.Errorf(errorTemplate, "Timestamp casting error") - } - t, err := time.Parse(time.RFC3339Nano, str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - e.TimeStamp = t - } - delete(props, "Timestamp") - delete(props, "Timestamp@odata.type") - - // deserialize entity (user defined fields) - for k, v := range props { - if strings.HasSuffix(k, OdataTypeSuffix) { - valueKey := strings.TrimSuffix(k, OdataTypeSuffix) - str, ok := props[valueKey].(string) - if !ok { - return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) - } - switch v { - case OdataBinary: - props[valueKey], err = base64.StdEncoding.DecodeString(str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - case OdataDateTime: - t, err := time.Parse("2006-01-02T15:04:05Z", str) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - props[valueKey] = t - case OdataGUID: - props[valueKey] = uuid.FromStringOrNil(str) - case OdataInt64: - i, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return fmt.Errorf(errorTemplate, err) - } - props[valueKey] = i - default: - return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) - } - delete(props, k) - } - } - - e.Properties = props - return nil -} - -func getAndDelete(props map[string]interface{}, key string) interface{} { - if value, ok := props[key]; ok { - delete(props, key) - return value - } - return nil -} - -func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { - if force { - h[headerIfMatch] = "*" - } else { - h[headerIfMatch] = etag - } - return h -} - -// updates Etag and timestamp -func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { - e.OdataEtag = headers.Get(headerEtag) - return e.updateTimestamp(headers) -} - -func (e *Entity) updateTimestamp(headers http.Header) error { - str := headers.Get(headerDate) - t, err := time.Parse(time.RFC1123, str) - if err != nil { - return fmt.Errorf("Update timestamp error: %v", err) - } - e.TimeStamp = t - return nil -} - -func (e *Entity) insertOr(verb string, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateEtagAndTimestamp(resp.Header) -} - -func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { - query, headers := options.getParameters() - headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) - - body, err := json.Marshal(e) - if err != nil { - return err - } - headers = addBodyRelatedHeaders(headers, len(body)) - headers = addIfMatchHeader(headers, force, e.OdataEtag) - headers = addReturnContentHeaders(headers, EmptyPayload) - - uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) - resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) - if err != nil { - if resp.StatusCode == http.StatusPreconditionFailed { - return fmt.Errorf(etagErrorTemplate, err) - } - return err - } - defer drainRespBody(resp) - - if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - - return e.updateEtagAndTimestamp(resp.Header) -} - -func stringFromMap(props map[string]interface{}, key string) string { - value := getAndDelete(props, key) - if value != nil { - return value.(string) - } - return "" -} - -func (options *EntityOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - query = addTimeout(query, options.Timeout) - headers = headersFromStruct(*options) - } - return query, headers -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go deleted file mode 100644 index 06bbe4ba0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ /dev/null @@ -1,480 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "sync" -) - -const fourMB = uint64(4194304) -const oneTB = uint64(1099511627776) - -// Export maximum range and file sizes -const MaxRangeSize = fourMB -const MaxFileSize = oneTB - -// File represents a file on a share. -type File struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties FileProperties `xml:"Properties"` - share *Share - FileCopyProperties FileCopyState - mutex *sync.Mutex -} - -// FileProperties contains various properties of a file. -type FileProperties struct { - CacheControl string `header:"x-ms-cache-control"` - Disposition string `header:"x-ms-content-disposition"` - Encoding string `header:"x-ms-content-encoding"` - Etag string - Language string `header:"x-ms-content-language"` - LastModified string - Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` - MD5 string `header:"x-ms-content-md5"` - Type string `header:"x-ms-content-type"` -} - -// FileCopyState contains various properties of a file copy operation. -type FileCopyState struct { - CompletionTime string - ID string `header:"x-ms-copy-id"` - Progress string - Source string - Status string `header:"x-ms-copy-status"` - StatusDesc string -} - -// FileStream contains file data returned from a call to GetFile. -type FileStream struct { - Body io.ReadCloser - ContentMD5 string -} - -// FileRequestOptions will be passed to misc file operations. -// Currently just Timeout (in seconds) but could expand. -type FileRequestOptions struct { - Timeout uint // timeout duration in seconds. -} - -func prepareOptions(options *FileRequestOptions) url.Values { - params := url.Values{} - if options != nil { - params = addTimeout(params, options.Timeout) - } - return params -} - -// FileRanges contains a list of file range information for a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -type FileRanges struct { - ContentLength uint64 - LastModified string - ETag string - FileRanges []FileRange `xml:"Range"` -} - -// FileRange contains range information for a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -type FileRange struct { - Start uint64 `xml:"Start"` - End uint64 `xml:"End"` -} - -func (fr FileRange) String() string { - return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) -} - -// builds the complete file path for this file object -func (f *File) buildPath() string { - return f.parent.buildPath() + "/" + f.Name -} - -// ClearRange releases the specified range of space in a file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range -func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { - var timeout *uint - if options != nil { - timeout = &options.Timeout - } - headers, err := f.modifyRange(nil, fileRange, timeout, nil) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// Create creates a new file or replaces an existing one. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File -func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { - if maxSize > oneTB { - return fmt.Errorf("max file size is 1TB") - } - params := prepareOptions(options) - headers := headersFromStruct(f.Properties) - headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) - headers["x-ms-type"] = "file" - - outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) - if err != nil { - return err - } - - f.Properties.Length = maxSize - f.updateEtagAndLastModified(outputHeaders) - return nil -} - -// CopyFile operation copied a file/blob from the sourceURL to the path provided. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file -func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { - extraHeaders := map[string]string{ - "x-ms-type": "file", - "x-ms-copy-source": sourceURL, - } - params := prepareOptions(options) - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") - f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") - return nil -} - -// Delete immediately removes this file from the storage account. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 -func (f *File) Delete(options *FileRequestOptions) error { - return f.fsc.deleteResource(f.buildPath(), resourceFile, options) -} - -// DeleteIfExists removes this file if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 -func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// GetFileOptions includes options for a get file operation -type GetFileOptions struct { - Timeout uint - GetContentMD5 bool -} - -// DownloadToStream operation downloads the file. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { - params := prepareOptions(options) - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - drainRespBody(resp) - return nil, err - } - return resp.Body, nil -} - -// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { - extraHeaders := map[string]string{ - "Range": fileRange.String(), - } - params := url.Values{} - if options != nil { - if options.GetContentMD5 { - if isRangeTooBig(fileRange) { - return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") - } - extraHeaders["x-ms-range-get-content-md5"] = "true" - } - params = addTimeout(params, options.Timeout) - } - - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) - if err != nil { - return fs, err - } - - if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - drainRespBody(resp) - return fs, err - } - - fs.Body = resp.Body - if options != nil && options.GetContentMD5 { - fs.ContentMD5 = resp.Header.Get("Content-MD5") - } - return fs, nil -} - -// Exists returns true if this file exists. -func (f *File) Exists() (bool, error) { - exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) - if exists { - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - } - return exists, err -} - -// FetchAttributes updates metadata and properties for this file. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties -func (f *File) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - f.updateProperties(headers) - f.Metadata = getMetadataFromHeaders(headers) - return nil -} - -// returns true if the range is larger than 4MB -func isRangeTooBig(fileRange FileRange) bool { - if fileRange.End-fileRange.Start > fourMB { - return true - } - - return false -} - -// ListRangesOptions includes options for a list file ranges operation -type ListRangesOptions struct { - Timeout uint - ListRange *FileRange -} - -// ListRanges returns the list of valid ranges for this file. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges -func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { - params := url.Values{"comp": {"rangelist"}} - - // add optional range to list - var headers map[string]string - if options != nil { - params = addTimeout(params, options.Timeout) - if options.ListRange != nil { - headers = make(map[string]string) - headers["Range"] = options.ListRange.String() - } - } - - resp, err := f.fsc.listContent(f.buildPath(), params, headers) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - var cl uint64 - cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64) - if err != nil { - ioutil.ReadAll(resp.Body) - return nil, err - } - - var out FileRanges - out.ContentLength = cl - out.ETag = resp.Header.Get("ETag") - out.LastModified = resp.Header.Get("Last-Modified") - - err = xmlUnmarshal(resp.Body, &out) - return &out, err -} - -// modifies a range of bytes in this file -func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { - if err := f.fsc.checkForStorageEmulator(); err != nil { - return nil, err - } - if fileRange.End < fileRange.Start { - return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if bytes != nil && isRangeTooBig(fileRange) { - return nil, errors.New("range cannot exceed 4MB in size") - } - - params := url.Values{"comp": {"range"}} - if timeout != nil { - params = addTimeout(params, *timeout) - } - - uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) - - // default to clear - write := "clear" - cl := uint64(0) - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (fileRange.End - fileRange.Start) + 1 - } - - extraHeaders := map[string]string{ - "Content-Length": strconv.FormatUint(cl, 10), - "Range": fileRange.String(), - "x-ms-write": write, - } - - if contentMD5 != nil { - extraHeaders["Content-MD5"] = *contentMD5 - } - - headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) - resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) - if err != nil { - return nil, err - } - defer drainRespBody(resp) - return resp.Header, checkRespCode(resp, []int{http.StatusCreated}) -} - -// SetMetadata replaces the metadata for this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetFileMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata -func (f *File) SetMetadata(options *FileRequestOptions) error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties on this file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetFileProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties -func (f *File) SetProperties(options *FileRequestOptions) error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) - if err != nil { - return err - } - - f.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (f *File) updateEtagAndLastModified(headers http.Header) { - f.Properties.Etag = headers.Get("Etag") - f.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates file properties from the specified HTTP header -func (f *File) updateProperties(header http.Header) { - size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) - if err == nil { - f.Properties.Length = size - } - - f.updateEtagAndLastModified(header) - f.Properties.CacheControl = header.Get("Cache-Control") - f.Properties.Disposition = header.Get("Content-Disposition") - f.Properties.Encoding = header.Get("Content-Encoding") - f.Properties.Language = header.Get("Content-Language") - f.Properties.MD5 = header.Get("Content-MD5") - f.Properties.Type = header.Get("Content-Type") -} - -// URL gets the canonical URL to this file. -// This method does not create a publicly accessible URL if the file -// is private and this method does not check if the file exists. -func (f *File) URL() string { - return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) -} - -// WriteRangeOptions includes options for a write file range operation -type WriteRangeOptions struct { - Timeout uint - ContentMD5 string -} - -// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside -// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with -// a maximum size of 4MB. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range -func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { - if bytes == nil { - return errors.New("bytes cannot be nil") - } - var timeout *uint - var md5 *string - if options != nil { - timeout = &options.Timeout - md5 = &options.ContentMD5 - } - - headers, err := f.modifyRange(bytes, fileRange, timeout, md5) - if err != nil { - return err - } - // it's perfectly legal for multiple go routines to call WriteRange - // on the same *File (e.g. concurrently writing non-overlapping ranges) - // so we must take the file mutex before updating our properties. - f.mutex.Lock() - f.updateEtagAndLastModified(headers) - f.mutex.Unlock() - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go deleted file mode 100644 index 1db8e7da6..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ /dev/null @@ -1,338 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" -) - -// FileServiceClient contains operations for Microsoft Azure File Service. -type FileServiceClient struct { - client Client - auth authentication -} - -// ListSharesParameters defines the set of customizable parameters to make a -// List Shares call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares -type ListSharesParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// ShareListResponse contains the response fields from -// ListShares call. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares -type ShareListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Shares []Share `xml:"Shares>Share"` -} - -type compType string - -const ( - compNone compType = "" - compList compType = "list" - compMetadata compType = "metadata" - compProperties compType = "properties" - compRangeList compType = "rangelist" -) - -func (ct compType) String() string { - return string(ct) -} - -type resourceType string - -const ( - resourceDirectory resourceType = "directory" - resourceFile resourceType = "" - resourceShare resourceType = "share" -) - -func (rt resourceType) String() string { - return string(rt) -} - -func (p ListSharesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - if p.Timeout != 0 { - out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) - } - - return out -} - -func (p ListDirsAndFilesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.MaxResults != 0 { - out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) - } - out = addTimeout(out, p.Timeout) - - return out -} - -// returns url.Values for the specified types -func getURLInitValues(comp compType, res resourceType) url.Values { - values := url.Values{} - if comp != compNone { - values.Set("comp", comp.String()) - } - if res != resourceFile { - values.Set("restype", res.String()) - } - return values -} - -// GetShareReference returns a Share object for the specified share name. -func (f *FileServiceClient) GetShareReference(name string) *Share { - return &Share{ - fsc: f, - Name: name, - Properties: ShareProperties{ - Quota: -1, - }, - } -} - -// ListShares returns the list of shares in a storage account along with -// pagination token and other response details. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares -func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - - var out ShareListResponse - resp, err := f.listContent("", q, nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - err = xmlUnmarshal(resp.Body, &out) - - // assign our client to the newly created Share objects - for i := range out.Shares { - out.Shares[i].fsc = &f - } - return &out, err -} - -// GetServiceProperties gets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties -func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return f.client.getServiceProperties(fileServiceName, f.auth) -} - -// SetServiceProperties sets the properties of your storage account's file service. -// File service does not support logging -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties -func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { - return f.client.setServiceProperties(props, fileServiceName, f.auth) -} - -// retrieves directory or share content -func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - drainRespBody(resp) - return nil, err - } - - return resp, nil -} - -// returns true if the specified resource exists -func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return false, nil, err - } - - uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, resp.Header, nil - } - } - return false, nil, err -} - -// creates a resource depending on the specified resource type -func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { - resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) - if err != nil { - return nil, err - } - defer drainRespBody(resp) - return resp.Header, checkRespCode(resp, expectedResponseCodes) -} - -// creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := getURLInitValues(compNone, res) - combinedParams := mergeParams(values, urlParams) - uri := f.client.getEndpoint(fileServiceName, path, combinedParams) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) -} - -// returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) - if err != nil { - return nil, err - } - defer drainRespBody(resp) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - return resp.Header, nil -} - -// gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params = mergeParams(params, getURLInitValues(comp, res)) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(verb, uri, headers, nil, f.auth) -} - -// deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { - resp, err := f.deleteResourceNoClose(path, res, options) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusAccepted}) -} - -// deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) - uri := f.client.getEndpoint(fileServiceName, path, values) - return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) -} - -// merges metadata into extraHeaders and returns extraHeaders -func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { - if metadata == nil && extraHeaders == nil { - return nil - } - if extraHeaders == nil { - extraHeaders = make(map[string]string) - } - for k, v := range metadata { - extraHeaders[userDefinedMetadataHeaderPrefix+k] = v - } - return extraHeaders -} - -// sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) - uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) - if err != nil { - return nil, err - } - defer drainRespBody(resp) - - return resp.Header, checkRespCode(resp, []int{http.StatusOK}) -} - -//checkForStorageEmulator determines if the client is setup for use with -//Azure Storage Emulator, and returns a relevant error -func (f FileServiceClient) checkForStorageEmulator() error { - if f.client.accountName == StorageEmulatorAccountName { - return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go deleted file mode 100644 index 5b4a65145..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go +++ /dev/null @@ -1,201 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "net/http" - "net/url" - "strconv" - "time" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - headerLeaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// leasePut is common PUT code for the various acquire/release/break etc functions. -func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return nil, err - } - defer drainRespBody(resp) - - if err := checkRespCode(resp, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.Header, nil -} - -// LeaseOptions includes options for all operations regarding leasing blobs -type LeaseOptions struct { - Timeout uint - Origin string `header:"Origin"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - RequestID string `header:"x-ms-client-request-id"` -} - -// AcquireLease creates a lease for a blob -// returns leaseID acquired -// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum -// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = acquireLease - - if leaseTimeInSeconds == -1 { - // Do nothing, but don't trigger the following clauses. - } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { - leaseTimeInSeconds = 60 - } else if leaseTimeInSeconds < 15 { - leaseTimeInSeconds = 15 - } - - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - - if proposedLeaseID != "" { - headers[leaseProposedID] = proposedLeaseID - } - - respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// BreakLease breaks the lease for a blob -// Returns the timeout remaining in the lease in seconds -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(headers, options) -} - -// BreakLeaseWithBreakPeriod breaks the lease for a blob -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(headers, options) -} - -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { - - respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) - if err != nil { - return 0, err - } - - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob -// Returns the new LeaseID acquired -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[headerLeaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob -func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return err - } - - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { - headers := b.Container.bsc.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(headers, http.StatusOK, options) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go deleted file mode 100644 index ffc183be6..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go +++ /dev/null @@ -1,171 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" - "time" -) - -// Message represents an Azure message. -type Message struct { - Queue *Queue - Text string `xml:"MessageText"` - ID string `xml:"MessageId"` - Insertion TimeRFC1123 `xml:"InsertionTime"` - Expiration TimeRFC1123 `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - NextVisible TimeRFC1123 `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` -} - -func (m *Message) buildPath() string { - return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) -} - -// PutMessageOptions is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageOptions struct { - Timeout uint - VisibilityTimeout int - MessageTTL int - RequestID string `header:"x-ms-client-request-id"` -} - -// Put operation adds a new message to the back of the message queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message -func (m *Message) Put(options *PutMessageOptions) error { - query := url.Values{} - headers := m.Queue.qsc.client.getStandardHeaders() - - req := putMessageRequest{MessageText: m.Text} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(nn) - - if options != nil { - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - if options.MessageTTL != 0 { - query.Set("messagettl", strconv.Itoa(options.MessageTTL)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) - resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - err = checkRespCode(resp, []int{http.StatusCreated}) - if err != nil { - return err - } - err = xmlUnmarshal(resp.Body, m) - if err != nil { - return err - } - return nil -} - -// UpdateMessageOptions is the set of options can be specified for Update Messsage -// operation. A zero struct does not use any preferences for the request. -type UpdateMessageOptions struct { - Timeout uint - VisibilityTimeout int - RequestID string `header:"x-ms-client-request-id"` -} - -// Update operation updates the specified message. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message -func (m *Message) Update(options *UpdateMessageOptions) error { - query := url.Values{} - if m.PopReceipt != "" { - query.Set("popreceipt", m.PopReceipt) - } - - headers := m.Queue.qsc.client.getStandardHeaders() - req := putMessageRequest{MessageText: m.Text} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(nn) - // visibilitytimeout is required for Update (zero or greater) so set the default here - query.Set("visibilitytimeout", "0") - if options != nil { - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) - - resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - m.PopReceipt = resp.Header.Get("x-ms-popreceipt") - nextTimeStr := resp.Header.Get("x-ms-time-next-visible") - if nextTimeStr != "" { - nextTime, err := time.Parse(time.RFC1123, nextTimeStr) - if err != nil { - return err - } - m.NextVisible = TimeRFC1123(nextTime) - } - - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// Delete operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (m *Message) Delete(options *QueueServiceOptions) error { - params := url.Values{"popreceipt": {m.PopReceipt}} - headers := m.Queue.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) - - resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go deleted file mode 100644 index 800adf129..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go +++ /dev/null @@ -1,47 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// MetadataLevel determines if operations should return a paylod, -// and it level of detail. -type MetadataLevel string - -// This consts are meant to help with Odata supported operations -const ( - OdataTypeSuffix = "@odata.type" - - // Types - - OdataBinary = "Edm.Binary" - OdataDateTime = "Edm.DateTime" - OdataGUID = "Edm.Guid" - OdataInt64 = "Edm.Int64" - - // Query options - - OdataFilter = "$filter" - OdataOrderBy = "$orderby" - OdataTop = "$top" - OdataSkip = "$skip" - OdataCount = "$count" - OdataExpand = "$expand" - OdataSelect = "$select" - OdataSearch = "$search" - - EmptyPayload MetadataLevel = "" - NoMetadata MetadataLevel = "application/json;odata=nometadata" - MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" - FullMetadata MetadataLevel = "application/json;odata=fullmetadata" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go deleted file mode 100644 index 7ffd63821..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ /dev/null @@ -1,203 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "time" -) - -// GetPageRangesResponse contains the response fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// PutPageOptions includes the options for a put page operation -type PutPageOptions struct { - Timeout uint - LeaseID string `header:"x-ms-lease-id"` - IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` - IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` - IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` - IfModifiedSince *time.Time `header:"If-Modified-Since"` - IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` - IfMatch string `header:"If-Match"` - IfNoneMatch string `header:"If-None-Match"` - RequestID string `header:"x-ms-client-request-id"` -} - -// WriteRange writes a range of pages to a page blob. -// Ranges must be aligned with 512-byte boundaries and chunk must be of size -// multiplies by 512. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page -func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { - if bytes == nil { - return errors.New("bytes cannot be nil") - } - return b.modifyRange(blobRange, bytes, options) -} - -// ClearRange clears the given range in a page blob. -// Ranges must be aligned with 512-byte boundaries and chunk must be of size -// multiplies by 512. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page -func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { - return b.modifyRange(blobRange, nil, options) -} - -func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { - if blobRange.End < blobRange.Start { - return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if blobRange.Start%512 != 0 { - return errors.New("the value for rangeStart must be a multiple of 512") - } - if blobRange.End%512 != 511 { - return errors.New("the value for rangeEnd must be a multiple of 512 - 1") - } - - params := url.Values{"comp": {"page"}} - - // default to clear - write := "clear" - var cl uint64 - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (blobRange.End - blobRange.Start) + 1 - } - - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = write - headers["x-ms-range"] = blobRange.String() - headers["Content-Length"] = fmt.Sprintf("%v", cl) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// GetPageRangesOptions includes the options for a get page ranges operation -type GetPageRangesOptions struct { - Timeout uint - Snapshot *time.Time - PreviousSnapshot *time.Time - Range *BlobRange - LeaseID string `header:"x-ms-lease-id"` - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges -func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { - params := url.Values{"comp": {"pagelist"}} - headers := b.Container.bsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - params = addSnapshot(params, options.Snapshot) - if options.PreviousSnapshot != nil { - params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot)) - } - if options.Range != nil { - headers["Range"] = options.Range.String() - } - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - var out GetPageRangesResponse - resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return out, err - } - defer drainRespBody(resp) - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See CreateBlockBlobFromReader for more info on creating blobs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob -func (b *Blob) PutPageBlob(options *PutBlobOptions) error { - if b.Properties.ContentLength%512 != 0 { - return errors.New("Content length must be aligned to a 512-byte boundary") - } - - params := url.Values{} - headers := b.Container.bsc.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) - headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) - headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - - resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) - if err != nil { - return err - } - return b.respondCreation(resp, BlobTypePage) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go deleted file mode 100644 index f90050cb0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ /dev/null @@ -1,436 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "time" -) - -const ( - // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" -) - -// QueueAccessPolicy represents each access policy in the queue ACL. -type QueueAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanAdd bool - CanUpdate bool - CanProcess bool -} - -// QueuePermissions represents the queue ACLs. -type QueuePermissions struct { - AccessPolicies []QueueAccessPolicy -} - -// SetQueuePermissionOptions includes options for a set queue permissions operation -type SetQueuePermissionOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// Queue represents an Azure queue. -type Queue struct { - qsc *QueueServiceClient - Name string - Metadata map[string]string - AproxMessageCount uint64 -} - -func (q *Queue) buildPath() string { - return fmt.Sprintf("/%s", q.Name) -} - -func (q *Queue) buildPathMessages() string { - return fmt.Sprintf("%s/messages", q.buildPath()) -} - -// QueueServiceOptions includes options for some queue service operations -type QueueServiceOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// Create operation creates a queue under the given account. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 -func (q *Queue) Create(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusCreated}) -} - -// Delete operation permanently deletes the specified queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 -func (q *Queue) Delete(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// Exists returns true if a queue with given name exists. -func (q *Queue) Exists() (bool, error) { - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) - resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusOK, nil - } - err = getErrorFromResponse(resp) - } - return false, err -} - -// SetMetadata operation sets user-defined metadata on the specified queue. -// Metadata is associated with the queue as name-value pairs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata -func (q *Queue) SetMetadata(options *QueueServiceOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := q.qsc.client.getStandardHeaders() - headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// GetMetadata operation retrieves user-defined metadata and queue -// properties on the specified queue. Metadata is associated with -// the queue as name-values pairs. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata -// -// Because the way Golang's http client (and http.Header in particular) -// canonicalize header names, the returned metadata names would always -// be all lower case. -func (q *Queue) GetMetadata(options *QueueServiceOptions) error { - params := url.Values{"comp": {"metadata"}} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) - if aproxMessagesStr != "" { - aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) - if err != nil { - return err - } - q.AproxMessageCount = aproxMessages - } - - q.Metadata = getMetadataFromHeaders(resp.Header) - return nil -} - -// GetMessageReference returns a message object with the specified text. -func (q *Queue) GetMessageReference(text string) *Message { - return &Message{ - Queue: q, - Text: text, - } -} - -// GetMessagesOptions is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesOptions struct { - Timeout uint - NumOfMessages int - VisibilityTimeout int - RequestID string `header:"x-ms-client-request-id"` -} - -type messages struct { - XMLName xml.Name `xml:"QueueMessagesList"` - Messages []Message `xml:"QueueMessage"` -} - -// GetMessages operation retrieves one or more messages from the front of the -// queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages -func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { - query := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - if options.NumOfMessages != 0 { - query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) - } - if options.VisibilityTimeout != 0 { - query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return []Message{}, err - } - defer resp.Body.Close() - - var out messages - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return []Message{}, err - } - for i := range out.Messages { - out.Messages[i].Queue = q - } - return out.Messages, err -} - -// PeekMessagesOptions is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesOptions struct { - Timeout uint - NumOfMessages int - RequestID string `header:"x-ms-client-request-id"` -} - -// PeekMessages retrieves one or more messages from the front of the queue, but -// does not alter the visibility of the message. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages -func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { - query := url.Values{"peekonly": {"true"}} // Required for peek operation - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - if options.NumOfMessages != 0 { - query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) - } - query = addTimeout(query, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) - - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return []Message{}, err - } - defer resp.Body.Close() - - var out messages - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return []Message{}, err - } - for i := range out.Messages { - out.Messages[i].Queue = q - } - return out.Messages, err -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages -func (q *Queue) ClearMessages(options *QueueServiceOptions) error { - params := url.Values{} - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) - - resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// SetPermissions sets up queue permissions -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl -func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { - body, length, err := generateQueueACLpayload(permissions.AccessPolicies) - if err != nil { - return err - } - - params := url.Values{ - "comp": {"acl"}, - } - headers := q.qsc.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(length) - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, qapd := range policies { - permission := qapd.generateQueuePermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { - // generate the permissions string (raup). - // still want the end user API to have bool flags. - permissions = "" - - if qapd.CanRead { - permissions += "r" - } - - if qapd.CanAdd { - permissions += "a" - } - - if qapd.CanUpdate { - permissions += "u" - } - - if qapd.CanProcess { - permissions += "p" - } - - return permissions -} - -// GetQueuePermissionOptions includes options for a get queue permissions operation -type GetQueuePermissionOptions struct { - Timeout uint - RequestID string `header:"x-ms-client-request-id"` -} - -// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl -// If timeout is 0 then it will not be passed to Azure -func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { - params := url.Values{ - "comp": {"acl"}, - } - headers := q.qsc.client.getStandardHeaders() - - if options != nil { - params = addTimeout(params, options.Timeout) - headers = mergeHeaders(headers, headersFromStruct(*options)) - } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var ap AccessPolicy - err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return buildQueueAccessPolicy(ap, &resp.Header), nil -} - -func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { - permissions := QueuePermissions{ - AccessPolicies: []QueueAccessPolicy{}, - } - - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - qapd := QueueAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") - qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") - qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") - - permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) - } - return &permissions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go deleted file mode 100644 index 28d9ab937..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go +++ /dev/null @@ -1,146 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// QueueSASOptions are options to construct a blob SAS -// URI. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -type QueueSASOptions struct { - QueueSASPermissions - SASOptions -} - -// QueueSASPermissions includes the available permissions for -// a queue SAS URI. -type QueueSASPermissions struct { - Read bool - Add bool - Update bool - Process bool -} - -func (q QueueSASPermissions) buildString() string { - permissions := "" - - if q.Read { - permissions += "r" - } - if q.Add { - permissions += "a" - } - if q.Update { - permissions += "u" - } - if q.Process { - permissions += "p" - } - return permissions -} - -// GetSASURI creates an URL to the specified queue which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas -func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { - canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) - if err != nil { - return "", err - } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedStart := "" - if options.Start != (time.Time{}) { - signedStart = options.Start.UTC().Format(time.RFC3339) - } - signedExpiry := options.Expiry.UTC().Format(time.RFC3339) - - protocols := "https,http" - if options.UseHTTPS { - protocols = "https" - } - - permissions := options.QueueSASPermissions.buildString() - stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) - if err != nil { - return "", err - } - - sig := q.qsc.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {q.qsc.client.apiVersion}, - "se": {signedExpiry}, - "sp": {permissions}, - "sig": {sig}, - } - - if q.qsc.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - addQueryParameter(sasParams, "sip", options.IP) - } - - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) - sasURL, err := url.Parse(uri) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/queue" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", - signedPermissions, - signedStart, - signedExpiry, - canonicalizedResource, - signedIdentifier, - signedIP, - protocols, - signedVersion), nil - - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go deleted file mode 100644 index 29febe146..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go +++ /dev/null @@ -1,42 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client - auth authentication -} - -// GetServiceProperties gets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties -func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return q.client.getServiceProperties(queueServiceName, q.auth) -} - -// SetServiceProperties sets the properties of your storage account's queue service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties -func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { - return q.client.setServiceProperties(props, queueServiceName, q.auth) -} - -// GetQueueReference returns a Container object for the specified queue name. -func (q *QueueServiceClient) GetQueueReference(name string) *Queue { - return &Queue{ - qsc: q, - Name: name, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go deleted file mode 100644 index cf75a2659..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ /dev/null @@ -1,216 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "net/url" - "strconv" -) - -// Share represents an Azure file share. -type Share struct { - fsc *FileServiceClient - Name string `xml:"Name"` - Properties ShareProperties `xml:"Properties"` - Metadata map[string]string -} - -// ShareProperties contains various properties of a share. -type ShareProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - Quota int `xml:"Quota"` -} - -// builds the complete path for this share object. -func (s *Share) buildPath() string { - return fmt.Sprintf("/%s", s.Name) -} - -// Create this share under the associated account. -// If a share with the same name already exists, the operation fails. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share -func (s *Share) Create(options *FileRequestOptions) error { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - params := prepareOptions(options) - headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// CreateIfNotExists creates this share under the associated account if -// it does not exist. Returns true if the share is newly created or false if -// the share already exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share -func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - params := prepareOptions(options) - resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { - if resp.StatusCode == http.StatusCreated { - s.updateEtagAndLastModified(resp.Header) - return true, nil - } - return false, s.FetchAttributes(nil) - } - } - - return false, err -} - -// Delete marks this share for deletion. The share along with any files -// and directories contained within it are later deleted during garbage -// collection. If the share does not exist the operation fails -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share -func (s *Share) Delete(options *FileRequestOptions) error { - return s.fsc.deleteResource(s.buildPath(), resourceShare, options) -} - -// DeleteIfExists operation marks this share for deletion if it exists. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share -func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { - resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) - if resp != nil { - defer drainRespBody(resp) - if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { - return resp.StatusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// Exists returns true if this share already exists -// on the storage account, otherwise returns false. -func (s *Share) Exists() (bool, error) { - exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) - if exists { - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - } - return exists, err -} - -// FetchAttributes retrieves metadata and properties for this share. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties -func (s *Share) FetchAttributes(options *FileRequestOptions) error { - params := prepareOptions(options) - headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - s.updateQuota(headers) - s.Metadata = getMetadataFromHeaders(headers) - - return nil -} - -// GetRootDirectoryReference returns a Directory object at the root of this share. -func (s *Share) GetRootDirectoryReference() *Directory { - return &Directory{ - fsc: s.fsc, - share: s, - } -} - -// ServiceClient returns the FileServiceClient associated with this share. -func (s *Share) ServiceClient() *FileServiceClient { - return s.fsc -} - -// SetMetadata replaces the metadata for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetShareMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata -func (s *Share) SetMetadata(options *FileRequestOptions) error { - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// SetProperties sets system properties for this share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetShareProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties -func (s *Share) SetProperties(options *FileRequestOptions) error { - extraheaders := map[string]string{} - if s.Properties.Quota > 0 { - if s.Properties.Quota > 5120 { - return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) - } - extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) - } - - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) - if err != nil { - return err - } - - s.updateEtagAndLastModified(headers) - return nil -} - -// updates Etag and last modified date -func (s *Share) updateEtagAndLastModified(headers http.Header) { - s.Properties.Etag = headers.Get("Etag") - s.Properties.LastModified = headers.Get("Last-Modified") -} - -// updates quota value -func (s *Share) updateQuota(headers http.Header) { - quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) - if err == nil { - s.Properties.Quota = quota - } -} - -// URL gets the canonical URL to this share. This method does not create a publicly accessible -// URL if the share is private and this method does not check if the share exists. -func (s *Share) URL() string { - return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go deleted file mode 100644 index 056ab398a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go +++ /dev/null @@ -1,61 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// AccessPolicyDetailsXML has specifics about an access policy -// annotated with XML details. -type AccessPolicyDetailsXML struct { - StartTime time.Time `xml:"Start"` - ExpiryTime time.Time `xml:"Expiry"` - Permission string `xml:"Permission"` -} - -// SignedIdentifier is a wrapper for a specific policy -type SignedIdentifier struct { - ID string `xml:"Id"` - AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` -} - -// SignedIdentifiers part of the response from GetPermissions call. -type SignedIdentifiers struct { - SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` -} - -// AccessPolicy is the response type from the GetPermissions call. -type AccessPolicy struct { - SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` -} - -// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the -// AccessPolicy struct which will get converted to XML. -func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { - return SignedIdentifier{ - ID: id, - AccessPolicy: AccessPolicyDetailsXML{ - StartTime: startTime.UTC().Round(time.Second), - ExpiryTime: expiryTime.UTC().Round(time.Second), - Permission: permissions, - }, - } -} - -func updatePermissions(permissions, permission string) bool { - return strings.Contains(permissions, permission) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go deleted file mode 100644 index c338975ab..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ /dev/null @@ -1,131 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" - "net/url" - "strconv" -) - -// ServiceProperties represents the storage account service properties -type ServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors -} - -// Logging represents the Azure Analytics Logging settings -type Logging struct { - Version string - Delete bool - Read bool - Write bool - RetentionPolicy *RetentionPolicy -} - -// RetentionPolicy indicates if retention is enabled and for how many days -type RetentionPolicy struct { - Enabled bool - Days *int -} - -// Metrics provide request statistics. -type Metrics struct { - Version string - Enabled bool - IncludeAPIs *bool - RetentionPolicy *RetentionPolicy -} - -// Cors includes all the CORS rules -type Cors struct { - CorsRule []CorsRule -} - -// CorsRule includes all settings for a Cors rule -type CorsRule struct { - AllowedOrigins string - AllowedMethods string - MaxAgeInSeconds int - ExposedHeaders string - AllowedHeaders string -} - -func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - headers := c.getStandardHeaders() - - resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - var out ServiceProperties - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return nil, err - } - - return &out, nil -} - -func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { - query := url.Values{ - "restype": {"service"}, - "comp": {"properties"}, - } - uri := c.getEndpoint(service, "", query) - - // Ideally, StorageServiceProperties would be the output struct - // This is to avoid golint stuttering, while generating the correct XML - type StorageServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors - } - input := StorageServiceProperties{ - Logging: props.Logging, - HourMetrics: props.HourMetrics, - MinuteMetrics: props.MinuteMetrics, - Cors: props.Cors, - } - - body, length, err := xmlMarshal(input) - if err != nil { - return err - } - - headers := c.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(length) - - resp, err := c.exec(http.MethodPut, uri, headers, body, auth) - if err != nil { - return err - } - defer drainRespBody(resp) - return checkRespCode(resp, []int{http.StatusAccepted}) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go deleted file mode 100644 index 22d9b4f5c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ /dev/null @@ -1,419 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - tablesURIPath = "/Tables" - nextTableQueryParameter = "NextTableName" - headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" - headerNextRowKey = "x-ms-continuation-NextRowKey" - nextPartitionKeyQueryParameter = "NextPartitionKey" - nextRowKeyQueryParameter = "NextRowKey" -) - -// TableAccessPolicy are used for SETTING table policies -type TableAccessPolicy struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanAppend bool - CanUpdate bool - CanDelete bool -} - -// Table represents an Azure table. -type Table struct { - tsc *TableServiceClient - Name string `json:"TableName"` - OdataEditLink string `json:"odata.editLink"` - OdataID string `json:"odata.id"` - OdataMetadata string `json:"odata.metadata"` - OdataType string `json:"odata.type"` -} - -// EntityQueryResult contains the response from -// ExecuteQuery and ExecuteQueryNextResults functions. -type EntityQueryResult struct { - OdataMetadata string `json:"odata.metadata"` - Entities []*Entity `json:"value"` - QueryNextLink - table *Table -} - -type continuationToken struct { - NextPartitionKey string - NextRowKey string -} - -func (t *Table) buildPath() string { - return fmt.Sprintf("/%s", t.Name) -} - -func (t *Table) buildSpecificPath() string { - return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) -} - -// Get gets the referenced table. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities -func (t *Table) Get(timeout uint, ml MetadataLevel) error { - if ml == EmptyPayload { - return errEmptyPayload - } - - query := url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - } - headers := t.tsc.client.getStandardHeaders() - headers[headerAccept] = string(ml) - - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return err - } - defer resp.Body.Close() - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return err - } - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - err = json.Unmarshal(respBody, t) - if err != nil { - return err - } - return nil -} - -// Create creates the referenced table. -// This function fails if the name is not compliant -// with the specification or the tables already exists. -// ml determines the level of detail of metadata in the operation response, -// or no data at all. -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table -func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { - uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ - "timeout": {strconv.FormatUint(uint64(timeout), 10)}, - }) - - type createTableRequest struct { - TableName string `json:"TableName"` - } - req := createTableRequest{TableName: t.Name} - buf := new(bytes.Buffer) - if err := json.NewEncoder(buf).Encode(req); err != nil { - return err - } - - headers := t.tsc.client.getStandardHeaders() - headers = addReturnContentHeaders(headers, ml) - headers = addBodyRelatedHeaders(headers, buf.Len()) - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) - if err != nil { - return err - } - defer resp.Body.Close() - - if ml == EmptyPayload { - if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil { - return err - } - } else { - if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { - return err - } - } - - if ml != EmptyPayload { - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - err = json.Unmarshal(data, t) - if err != nil { - return err - } - } - - return nil -} - -// Delete deletes the referenced table. -// This function fails if the table is not present. -// Be advised: Delete deletes all the entries that may be present. -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table -func (t *Table) Delete(timeout uint, options *TableOptions) error { - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ - "timeout": {strconv.Itoa(int(timeout))}, - }) - - headers := t.tsc.client.getStandardHeaders() - headers = addReturnContentHeaders(headers, EmptyPayload) - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -// QueryOptions includes options for a query entities operation. -// Top, filter and select are OData query options. -type QueryOptions struct { - Top uint - Filter string - Select []string - RequestID string -} - -func (options *QueryOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - if options.Top > 0 { - query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) - } - if options.Filter != "" { - query.Add(OdataFilter, options.Filter) - } - if len(options.Select) > 0 { - query.Add(OdataSelect, strings.Join(options.Select, ",")) - } - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - } - return query, headers -} - -// QueryEntities returns the entities in the table. -// You can use query options defined by the OData Protocol specification. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { - if ml == EmptyPayload { - return nil, errEmptyPayload - } - query, headers := options.getParameters() - query = addTimeout(query, timeout) - uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) - return t.queryEntities(uri, headers, ml) -} - -// NextResults returns the next page of results -// from a QueryEntities or NextResults operation. -// -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination -func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { - if eqr == nil { - return nil, errNilPreviousResult - } - if eqr.NextLink == nil { - return nil, errNilNextLink - } - headers := options.addToHeaders(map[string]string{}) - return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) -} - -// SetPermissions sets up table ACL permissions -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL -func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { - params := url.Values{"comp": {"acl"}, - "timeout": {strconv.Itoa(int(timeout))}, - } - - uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) - headers := t.tsc.client.getStandardHeaders() - headers = options.addToHeaders(headers) - - body, length, err := generateTableACLPayload(tap) - if err != nil { - return err - } - headers["Content-Length"] = strconv.Itoa(length) - - resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp) - - return checkRespCode(resp, []int{http.StatusNoContent}) -} - -func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, tap := range policies { - permission := generateTablePermissions(&tap) - signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -// GetPermissions gets the table ACL permissions -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl -func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { - params := url.Values{"comp": {"acl"}, - "timeout": {strconv.Itoa(int(timeout))}, - } - - uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) - headers := t.tsc.client.getStandardHeaders() - headers = options.addToHeaders(headers) - - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - var ap AccessPolicy - err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) - if err != nil { - return nil, err - } - return updateTableAccessPolicy(ap), nil -} - -func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { - headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) - if ml != EmptyPayload { - headers[headerAccept] = string(ml) - } - - resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - var entities EntityQueryResult - err = json.Unmarshal(data, &entities) - if err != nil { - return nil, err - } - - for i := range entities.Entities { - entities.Entities[i].Table = t - } - entities.table = t - - contToken := extractContinuationTokenFromHeaders(resp.Header) - if contToken == nil { - entities.NextLink = nil - } else { - originalURI, err := url.Parse(uri) - if err != nil { - return nil, err - } - v := originalURI.Query() - v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) - v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) - newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) - entities.NextLink = &newURI - entities.ml = ml - } - - return &entities, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { - ct := continuationToken{ - NextPartitionKey: h.Get(headerNextPartitionKey), - NextRowKey: h.Get(headerNextRowKey), - } - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} - -func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { - taps := []TableAccessPolicy{} - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - tap := TableAccessPolicy{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") - tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") - tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - taps = append(taps, tap) - } - return taps -} - -func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { - // generate the permissions string (raud). - // still want the end user API to have bool flags. - permissions = "" - - if tap.CanRead { - permissions += "r" - } - - if tap.CanAppend { - permissions += "a" - } - - if tap.CanUpdate { - permissions += "u" - } - - if tap.CanDelete { - permissions += "d" - } - return permissions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go deleted file mode 100644 index a2159e296..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ /dev/null @@ -1,328 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "mime/multipart" - "net/http" - "net/textproto" - "sort" - "strings" - - "github.com/marstr/guid" -) - -// Operation type. Insert, Delete, Replace etc. -type Operation int - -// consts for batch operations. -const ( - InsertOp = Operation(1) - DeleteOp = Operation(2) - ReplaceOp = Operation(3) - MergeOp = Operation(4) - InsertOrReplaceOp = Operation(5) - InsertOrMergeOp = Operation(6) -) - -// BatchEntity used for tracking Entities to operate on and -// whether operations (replace/merge etc) should be forced. -// Wrapper for regular Entity with additional data specific for the entity. -type BatchEntity struct { - *Entity - Force bool - Op Operation -} - -// TableBatch stores all the entities that will be operated on during a batch process. -// Entities can be inserted, replaced or deleted. -type TableBatch struct { - BatchEntitySlice []BatchEntity - - // reference to table we're operating on. - Table *Table -} - -// defaultChangesetHeaders for changeSets -var defaultChangesetHeaders = map[string]string{ - "Accept": "application/json;odata=minimalmetadata", - "Content-Type": "application/json", - "Prefer": "return-no-content", -} - -// NewBatch return new TableBatch for populating. -func (t *Table) NewBatch() *TableBatch { - return &TableBatch{ - Table: t, - } -} - -// InsertEntity adds an entity in preparation for a batch insert. -func (t *TableBatch) InsertEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. -func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag -func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { - t.InsertOrReplaceEntity(entity, true) -} - -// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. -func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag -func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { - t.InsertOrMergeEntity(entity, true) -} - -// ReplaceEntity adds an entity in preparation for a batch replace. -func (t *TableBatch) ReplaceEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// DeleteEntity adds an entity in preparation for a batch delete -func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { - be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag -func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { - t.DeleteEntity(entity, true) -} - -// MergeEntity adds an entity in preparation for a batch merge -func (t *TableBatch) MergeEntity(entity *Entity) { - be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} - t.BatchEntitySlice = append(t.BatchEntitySlice, be) -} - -// ExecuteBatch executes many table operations in one request to Azure. -// The operations can be combinations of Insert, Delete, Replace and Merge -// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses -// the changesets. -// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions -func (t *TableBatch) ExecuteBatch() error { - - // Using `github.com/marstr/guid` is in response to issue #947 (https://github.com/Azure/azure-sdk-for-go/issues/947). - id, err := guid.NewGUIDs(guid.CreationStrategyVersion1) - if err != nil { - return err - } - - changesetBoundary := fmt.Sprintf("changeset_%s", id.String()) - uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) - changesetBody, err := t.generateChangesetBody(changesetBoundary) - if err != nil { - return err - } - - id, err = guid.NewGUIDs(guid.CreationStrategyVersion1) - if err != nil { - return err - } - - boundary := fmt.Sprintf("batch_%s", id.String()) - body, err := generateBody(changesetBody, changesetBoundary, boundary) - if err != nil { - return err - } - - headers := t.Table.tsc.client.getStandardHeaders() - headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) - - resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) - if err != nil { - return err - } - defer drainRespBody(resp.resp) - - if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil { - - // check which batch failed. - operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) - requestID, date, version := getDebugHeaders(resp.resp.Header) - return AzureStorageServiceError{ - StatusCode: resp.resp.StatusCode, - Code: resp.odata.Err.Code, - RequestID: requestID, - Date: date, - APIVersion: version, - Message: operationFailedMessage, - } - } - - return nil -} - -// getFailedOperation parses the original Azure error string and determines which operation failed -// and generates appropriate message. -func (t *TableBatch) getFailedOperation(errorMessage string) string { - // errorMessage consists of "number:string" we just need the number. - sp := strings.Split(errorMessage, ":") - if len(sp) > 1 { - msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) - return msg - } - - // cant parse the message, just return the original message to client - return errorMessage -} - -// generateBody generates the complete body for the batch request. -func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { - - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - writer.SetBoundary(boundary) - h := make(textproto.MIMEHeader) - h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) - batchWriter, err := writer.CreatePart(h) - if err != nil { - return nil, err - } - batchWriter.Write(changeSetBody.Bytes()) - writer.Close() - return body, nil -} - -// generateChangesetBody generates the individual changesets for the various operations within the batch request. -// There is a changeset for Insert, Delete, Merge etc. -func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { - - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - writer.SetBoundary(changesetBoundary) - - for _, be := range t.BatchEntitySlice { - t.generateEntitySubset(&be, writer) - } - - writer.Close() - return body, nil -} - -// generateVerb generates the HTTP request VERB required for each changeset. -func generateVerb(op Operation) (string, error) { - switch op { - case InsertOp: - return http.MethodPost, nil - case DeleteOp: - return http.MethodDelete, nil - case ReplaceOp, InsertOrReplaceOp: - return http.MethodPut, nil - case MergeOp, InsertOrMergeOp: - return "MERGE", nil - default: - return "", errors.New("Unable to detect operation") - } -} - -// generateQueryPath generates the query path for within the changesets -// For inserts it will just be a table query path (table name) -// but for other operations (modifying an existing entity) then -// the partition/row keys need to be generated. -func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { - if op == InsertOp { - return entity.Table.buildPath() - } - return entity.buildPath() -} - -// generateGenericOperationHeaders generates common headers for a given operation. -func generateGenericOperationHeaders(be *BatchEntity) map[string]string { - retval := map[string]string{} - - for k, v := range defaultChangesetHeaders { - retval[k] = v - } - - if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { - if be.Force || be.Entity.OdataEtag == "" { - retval["If-Match"] = "*" - } else { - retval["If-Match"] = be.Entity.OdataEtag - } - } - - return retval -} - -// generateEntitySubset generates body payload for particular batch entity -func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { - - h := make(textproto.MIMEHeader) - h.Set(headerContentType, "application/http") - h.Set(headerContentTransferEncoding, "binary") - - verb, err := generateVerb(batchEntity.Op) - if err != nil { - return err - } - - genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) - queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) - uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) - - operationWriter, err := writer.CreatePart(h) - if err != nil { - return err - } - - urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) - operationWriter.Write([]byte(urlAndVerb)) - writeHeaders(genericOpHeadersMap, &operationWriter) - operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. - - // delete operation doesn't need a body. - if batchEntity.Op != DeleteOp { - //var e Entity = batchEntity.Entity - body, err := json.Marshal(batchEntity.Entity) - if err != nil { - return err - } - operationWriter.Write(body) - } - - return nil -} - -func writeHeaders(h map[string]string, writer *io.Writer) { - // This way it is guaranteed the headers will be written in a sorted order - var keys []string - for k := range h { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go deleted file mode 100644 index 1f063a395..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go +++ /dev/null @@ -1,204 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -const ( - headerAccept = "Accept" - headerEtag = "Etag" - headerPrefer = "Prefer" - headerXmsContinuation = "x-ms-Continuation-NextTableName" -) - -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client - auth authentication -} - -// TableOptions includes options for some table operations -type TableOptions struct { - RequestID string -} - -func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { - if options != nil { - h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) - } - return h -} - -// QueryNextLink includes information for getting the next page of -// results in query operations -type QueryNextLink struct { - NextLink *string - ml MetadataLevel -} - -// GetServiceProperties gets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties -func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return t.client.getServiceProperties(tableServiceName, t.auth) -} - -// SetServiceProperties sets the properties of your storage account's table service. -// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties -func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { - return t.client.setServiceProperties(props, tableServiceName, t.auth) -} - -// GetTableReference returns a Table object for the specified table name. -func (t *TableServiceClient) GetTableReference(name string) *Table { - return &Table{ - tsc: t, - Name: name, - } -} - -// QueryTablesOptions includes options for some table operations -type QueryTablesOptions struct { - Top uint - Filter string - RequestID string -} - -func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { - query := url.Values{} - headers := map[string]string{} - if options != nil { - if options.Top > 0 { - query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) - } - if options.Filter != "" { - query.Add(OdataFilter, options.Filter) - } - headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) - } - return query, headers -} - -// QueryTables returns the tables in the storage account. -// You can use query options defined by the OData Protocol specification. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables -func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { - query, headers := options.getParameters() - uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) - return t.queryTables(uri, headers, ml) -} - -// NextResults returns the next page of results -// from a QueryTables or a NextResults operation. -// -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables -// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination -func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { - if tqr == nil { - return nil, errNilPreviousResult - } - if tqr.NextLink == nil { - return nil, errNilNextLink - } - headers := options.addToHeaders(map[string]string{}) - - return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) -} - -// TableQueryResult contains the response from -// QueryTables and QueryTablesNextResults functions. -type TableQueryResult struct { - OdataMetadata string `json:"odata.metadata"` - Tables []Table `json:"value"` - QueryNextLink - tsc *TableServiceClient -} - -func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { - if ml == EmptyPayload { - return nil, errEmptyPayload - } - headers = mergeHeaders(headers, t.client.getStandardHeaders()) - headers[headerAccept] = string(ml) - - resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { - return nil, err - } - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - var out TableQueryResult - err = json.Unmarshal(respBody, &out) - if err != nil { - return nil, err - } - - for i := range out.Tables { - out.Tables[i].tsc = t - } - out.tsc = t - - nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation)) - if nextLink == "" { - out.NextLink = nil - } else { - originalURI, err := url.Parse(uri) - if err != nil { - return nil, err - } - v := originalURI.Query() - v.Set(nextTableQueryParameter, nextLink) - newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) - out.NextLink = &newURI - out.ml = ml - } - - return &out, nil -} - -func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { - h[headerContentType] = "application/json" - h[headerContentLength] = fmt.Sprintf("%v", length) - h[headerAcceptCharset] = "UTF-8" - return h -} - -func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { - if ml != EmptyPayload { - h[headerPrefer] = "return-content" - h[headerAccept] = string(ml) - } else { - h[headerPrefer] = "return-no-content" - // From API version 2015-12-11 onwards, Accept header is required - h[headerAccept] = string(NoMetadata) - } - return h -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go deleted file mode 100644 index e8a5dcf8c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ /dev/null @@ -1,244 +0,0 @@ -package storage - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var ( - fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) - accountSASOptions = AccountSASTokenOptions{ - Services: Services{ - Blob: true, - }, - ResourceTypes: ResourceTypes{ - Service: true, - Container: true, - Object: true, - }, - Permissions: Permissions{ - Read: true, - Write: true, - Delete: true, - List: true, - Add: true, - Create: true, - Update: true, - Process: true, - }, - Expiry: fixedTime, - UseHTTPS: true, - } -) - -func (c Client) computeHmac256(message string) string { - h := hmac.New(sha256.New, c.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func currentTimeRfc1123Formatted() string { - return timeRfc1123Formatted(time.Now().UTC()) -} - -func timeRfc1123Formatted(t time.Time) string { - return t.Format(http.TimeFormat) -} - -func timeRFC3339Formatted(t time.Time) string { - return t.Format("2006-01-02T15:04:05.0000000Z") -} - -func mergeParams(v1, v2 url.Values) url.Values { - out := url.Values{} - for k, v := range v1 { - out[k] = v - } - for k, v := range v2 { - vals, ok := out[k] - if ok { - vals = append(vals, v...) - out[k] = vals - } else { - out[k] = v - } - } - return out -} - -func prepareBlockListRequest(blocks []Block) string { - s := `` - for _, v := range blocks { - s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) - } - s += `` - return s -} - -func xmlUnmarshal(body io.Reader, v interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -func xmlMarshal(v interface{}) (io.Reader, int, error) { - b, err := xml.Marshal(v) - if err != nil { - return nil, 0, err - } - return bytes.NewReader(b), len(b), nil -} - -func headersFromStruct(v interface{}) map[string]string { - headers := make(map[string]string) - value := reflect.ValueOf(v) - for i := 0; i < value.NumField(); i++ { - key := value.Type().Field(i).Tag.Get("header") - if key != "" { - reflectedValue := reflect.Indirect(value.Field(i)) - var val string - if reflectedValue.IsValid() { - switch reflectedValue.Type() { - case reflect.TypeOf(fixedTime): - val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) - case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): - val = strconv.FormatUint(reflectedValue.Uint(), 10) - case reflect.TypeOf(int(0)): - val = strconv.FormatInt(reflectedValue.Int(), 10) - default: - val = reflectedValue.String() - } - } - if val != "" { - headers[key] = val - } - } - } - return headers -} - -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers -} - -func addToHeaders(h map[string]string, key, value string) map[string]string { - if value != "" { - h[key] = value - } - return h -} - -func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { - if value != nil { - h = addToHeaders(h, key, timeRfc1123Formatted(*value)) - } - return h -} - -func addTimeout(params url.Values, timeout uint) url.Values { - if timeout > 0 { - params.Add("timeout", fmt.Sprintf("%v", timeout)) - } - return params -} - -func addSnapshot(params url.Values, snapshot *time.Time) url.Values { - if snapshot != nil { - params.Add("snapshot", timeRFC3339Formatted(*snapshot)) - } - return params -} - -func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { - var out time.Time - var err error - outStr := h.Get(key) - if outStr != "" { - out, err = time.Parse(time.RFC1123, outStr) - if err != nil { - return nil, err - } - } - return &out, nil -} - -// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling -type TimeRFC1123 time.Time - -// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. -func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var value string - d.DecodeElement(&value, &start) - parse, err := time.Parse(time.RFC1123, value) - if err != nil { - return err - } - *t = TimeRFC1123(parse) - return nil -} - -// MarshalXML marshals using time.RFC1123. -func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start) -} - -// returns a map of custom metadata values from the specified HTTP header -func getMetadataFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - if len(metadata) == 0 { - return nil - } - - return metadata -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go deleted file mode 100644 index d4f364abd..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package version - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// Number contains the semantic version of this SDK. -const Number = "v27.3.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md deleted file mode 100644 index fec416a9c..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# Azure Active Directory authentication for Go - -This is a standalone package for authenticating with Azure Active -Directory from other Go libraries and applications, in particular the [Azure SDK -for Go](https://github.com/Azure/azure-sdk-for-go). - -Note: Despite the package's name it is not related to other "ADAL" libraries -maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues -should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) -or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue -trackers. - -## Install - -```bash -go get -u github.com/Azure/go-autorest/autorest/adal -``` - -## Usage - -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). - -### Register an Azure AD Application with secret - - -1. Register a new application with a `secret` credential - - ``` - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --password secret - ``` - -2. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "Application ID" - ``` - - * Replace `Application ID` with `appId` from step 1. - -### Register an Azure AD Application with certificate - -1. Create a private key - - ``` - openssl genrsa -out "example-app.key" 2048 - ``` - -2. Create the certificate - - ``` - openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" - openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 - ``` - -3. Create the PKCS12 version of the certificate containing also the private key - - ``` - openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: - - ``` - -4. Register a new application with the certificate content form `example-app.crt` - - ``` - certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" - - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --key-usage Verify --end-date 2018-01-01 \ - --key-value "${certificateContents}" - ``` - -5. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "APPLICATION_ID" - ``` - - * Replace `APPLICATION_ID` with `appId` from step 4. - - -### Grant the necessary permissions - -Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) -which can be assigned to a service principal of an Azure AD application depending of your needs. - -``` -az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" -``` - -* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. -* Replace the `ROLE_NAME` with a role name of your choice. - -It is also possible to define custom role definitions. - -``` -az role definition create --role-definition role-definition.json -``` - -* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. - - -### Acquire Access Token - -The common configuration used by all flows: - -```Go -const activeDirectoryEndpoint = "https://login.microsoftonline.com/" -tenantID := "TENANT_ID" -oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - -applicationID := "APPLICATION_ID" - -callback := func(token adal.Token) error { - // This is called after the token is acquired -} - -// The resource for which the token is acquired -resource := "https://management.core.windows.net/" -``` - -* Replace the `TENANT_ID` with your tenant ID. -* Replace the `APPLICATION_ID` with the value from previous section. - -#### Client Credentials - -```Go -applicationSecret := "APPLICATION_SECRET" - -spt, err := adal.NewServicePrincipalToken( - *oauthConfig, - appliationID, - applicationSecret, - resource, - callbacks...) -if err != nil { - return nil, err -} - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Replace the `APPLICATION_SECRET` with the `password` value from previous section. - -#### Client Certificate - -```Go -certificatePath := "./example-app.pfx" - -certData, err := ioutil.ReadFile(certificatePath) -if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) -} - -// Get the certificate and private key from pfx file -certificate, rsaPrivateKey, err := decodePkcs12(certData, "") -if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) -} - -spt, err := adal.NewServicePrincipalTokenFromCertificate( - *oauthConfig, - applicationID, - certificate, - rsaPrivateKey, - resource, - callbacks...) - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Update the certificate path to point to the example-app.pfx file which was created in previous section. - - -#### Device Code - -```Go -oauthClient := &http.Client{} - -// Acquire the device code -deviceCode, err := adal.InitiateDeviceAuth( - oauthClient, - *oauthConfig, - applicationID, - resource) -if err != nil { - return nil, fmt.Errorf("Failed to start device auth flow: %s", err) -} - -// Display the authentication message -fmt.Println(*deviceCode.Message) - -// Wait here until the user is authenticated -token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) -if err != nil { - return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) -} - -spt, err := adal.NewServicePrincipalTokenFromManualToken( - *oauthConfig, - applicationID, - resource, - *token, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Username password authenticate - -```Go -spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - *oauthConfig, - applicationID, - username, - password, - resource, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Authorization code authenticate - -``` Go -spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - *oauthConfig, - applicationID, - clientSecret, - authorizationCode, - redirectURI, - resource, - callbacks...) - -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -### Command Line Tool - -A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. - -``` -adal -h - -Usage of ./adal: - -applicationId string - application id - -certificatePath string - path to pk12/PFC application certificate - -mode string - authentication mode (device, secret, cert, refresh) (default "device") - -resource string - resource for which the token is requested - -secret string - application secret - -tenantId string - tenant id - -tokenCachePath string - location of oath token cache (default "/home/cgc/.adal/accessToken.json") -``` - -Example acquire a token for `https://management.core.windows.net/` using device code flow: - -``` -adal -mode device \ - -applicationId "APPLICATION_ID" \ - -tenantId "TENANT_ID" \ - -resource https://management.core.windows.net/ - -``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index fa5964742..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,151 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" -) - -const ( - activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL `json:"authorityEndpoint"` - AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` - TokenEndpoint url.URL `json:"tokenEndpoint"` - DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` -} - -// IsZero returns true if the OAuthConfig object is zero-initialized. -func (oac OAuthConfig) IsZero() bool { - return oac == OAuthConfig{} -} - -func validateStringParam(param, name string) error { - if len(param) == 0 { - return fmt.Errorf("parameter '" + name + "' cannot be empty") - } - return nil -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - apiVer := "1.0" - return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) -} - -// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. -// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. -func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { - if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { - return nil, err - } - api := "" - // it's legal for tenantID to be empty so don't validate it - if apiVersion != nil { - if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { - return nil, err - } - api = fmt.Sprintf("?api-version=%s", *apiVersion) - } - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} - -// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. -type MultiTenantOAuthConfig interface { - PrimaryTenant() *OAuthConfig - AuxiliaryTenants() []*OAuthConfig -} - -// OAuthOptions contains optional OAuthConfig creation arguments. -type OAuthOptions struct { - APIVersion string -} - -func (c OAuthOptions) apiVersion() string { - if c.APIVersion != "" { - return fmt.Sprintf("?api-version=%s", c.APIVersion) - } - return "1.0" -} - -// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. -func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { - if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { - return nil, errors.New("must specify one to three auxiliary tenants") - } - mtCfg := multiTenantOAuthConfig{ - cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), - } - apiVer := options.apiVersion() - pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) - } - mtCfg.cfgs[0] = pri - for i := range auxiliaryTenantIDs { - aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) - } - mtCfg.cfgs[i+1] = aux - } - return mtCfg, nil -} - -type multiTenantOAuthConfig struct { - // first config in the slice is the primary tenant - cfgs []*OAuthConfig -} - -func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { - return m.cfgs[0] -} - -func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { - return m.cfgs[1:] -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index b38f4c245..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,242 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletion(sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - time.Sleep(waitDuration) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod deleted file mode 100644 index 66db8b9e2..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/Azure/go-autorest/autorest/adal - -go 1.12 - -require ( - github.com/Azure/go-autorest/autorest/date v0.1.0 - github.com/Azure/go-autorest/autorest/mocks v0.1.0 - github.com/Azure/go-autorest/tracing v0.5.0 - github.com/dgrijalva/jwt-go v3.2.0+incompatible - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 -) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum deleted file mode 100644 index 9525ff736..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum +++ /dev/null @@ -1,12 +0,0 @@ -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 9e15f2751..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,73 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index d7e4372bb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,95 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "net/http" - "net/http/cookiejar" - "sync" - - "github.com/Azure/go-autorest/tracing" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -var defaultSender Sender -var defaultSenderInit = &sync.Once{} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -func sender() Sender { - // note that we can't init defaultSender in init() since it will - // execute before calling code has had a chance to enable tracing - defaultSenderInit.Do(func() { - // Use behaviour compatible with DefaultTransport, but require TLS minimum version. - defaultTransport := http.DefaultTransport.(*http.Transport) - transport := &http.Transport{ - Proxy: defaultTransport.Proxy, - DialContext: defaultTransport.DialContext, - MaxIdleConns: defaultTransport.MaxIdleConns, - IdleConnTimeout: defaultTransport.IdleConnTimeout, - TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, - ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - defaultSender = &http.Client{Jar: j, Transport: roundTripper} - }) - return defaultSender -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index b72753498..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,1080 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math" - "net" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows - OAuthGrantTypeUserPass = "password" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows - OAuthGrantTypeAuthorizationCode = "authorization_code" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" - - // msiEndpoint is the well known endpoint for getting MSI authentications tokens - msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // the default number of attempts to refresh an MSI authentication token - defaultMaxMSIRefreshAttempts = 5 -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. -type MultitenantOAuthTokenProvider interface { - PrimaryOAuthToken() string - AuxiliaryOAuthTokens() []string -} - -// TokenRefreshError is an interface used by errors returned during token refresh. -type TokenRefreshError interface { - error - Response() *http.Response -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// RefresherWithContext is an interface for token refresh functionality -type RefresherWithContext interface { - RefreshWithContext(ctx context.Context) error - RefreshExchangeWithContext(ctx context.Context, resource string) error - EnsureFreshWithContext(ctx context.Context) error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn json.Number `json:"expires_in"` - ExpiresOn json.Number `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -func newToken() Token { - return Token{ - ExpiresIn: "0", - ExpiresOn: "0", - NotBefore: "0", - } -} - -// IsZero returns true if the token object is zero-initialized. -func (t Token) IsZero() bool { - return t == Token{} -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := t.ExpiresOn.Float64() - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(s) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -//OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// MarshalJSON implements the json.Marshaler interface. -func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalNoSecret", - }) -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string `json:"value"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalTokenSecret", - Value: tokenSecret.ClientSecret, - }) -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} - token.Header["x5c"] = x5c - token.Claims = jwt.MapClaims{ - "aud": spt.inner.OauthConfig.TokenEndpoint.String(), - "iss": spt.inner.ClientID, - "sub": spt.inner.ClientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") -} - -// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. -type ServicePrincipalUsernamePasswordSecret struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("username", secret.Username) - v.Set("password", secret.Password) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Username string `json:"username"` - Password string `json:"password"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalUsernamePasswordSecret", - Username: secret.Username, - Password: secret.Password, - }) -} - -// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. -type ServicePrincipalAuthorizationCodeSecret struct { - ClientSecret string `json:"value"` - AuthorizationCode string `json:"authCode"` - RedirectURI string `json:"redirect"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("code", secret.AuthorizationCode) - v.Set("client_secret", secret.ClientSecret) - v.Set("redirect_uri", secret.RedirectURI) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - AuthCode string `json:"authCode"` - Redirect string `json:"redirect"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalAuthorizationCodeSecret", - Value: secret.ClientSecret, - AuthCode: secret.AuthorizationCode, - Redirect: secret.RedirectURI, - }) -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - inner servicePrincipalToken - refreshLock *sync.RWMutex - sender Sender - refreshCallbacks []TokenRefreshCallback - // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. - MaxMSIRefreshAttempts int -} - -// MarshalTokenJSON returns the marshalled inner token. -func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { - return json.Marshal(spt.inner.Token) -} - -// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. -func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { - spt.refreshCallbacks = callbacks -} - -// MarshalJSON implements the json.Marshaler interface. -func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { - return json.Marshal(spt.inner) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { - // need to determine the token type - raw := map[string]interface{}{} - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - secret := raw["secret"].(map[string]interface{}) - switch secret["type"] { - case "ServicePrincipalNoSecret": - spt.inner.Secret = &ServicePrincipalNoSecret{} - case "ServicePrincipalTokenSecret": - spt.inner.Secret = &ServicePrincipalTokenSecret{} - case "ServicePrincipalCertificateSecret": - return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") - case "ServicePrincipalMSISecret": - return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") - case "ServicePrincipalUsernamePasswordSecret": - spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} - case "ServicePrincipalAuthorizationCodeSecret": - spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} - default: - return fmt.Errorf("unrecognized token type '%s'", secret["type"]) - } - err = json.Unmarshal(data, &spt.inner) - if err != nil { - return err - } - // Don't override the refreshLock or the sender if those have been already set. - if spt.refreshLock == nil { - spt.refreshLock = &sync.RWMutex{} - } - if spt.sender == nil { - spt.sender = sender() - } - return nil -} - -// internal type used for marshalling/unmarshalling -type servicePrincipalToken struct { - Token Token `json:"token"` - Secret ServicePrincipalSecret `json:"secret"` - OauthConfig OAuthConfig `json:"oauth"` - ClientID string `json:"clientID"` - Resource string `json:"resource"` - AutoRefresh bool `json:"autoRefresh"` - RefreshWithin time.Duration `json:"refreshWithin"` -} - -func validateOAuthConfig(oac OAuthConfig) error { - if oac.IsZero() { - return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") - } - return nil -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(id, "id"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: oauthConfig, - Secret: secret, - ClientID: id, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret -func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - secret, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. -func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(username, "username"); err != nil { - return nil, err - } - if err := validateStringParam(password, "password"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalUsernamePasswordSecret{ - Username: username, - Password: password, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the -func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(clientSecret, "clientSecret"); err != nil { - return nil, err - } - if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { - return nil, err - } - if err := validateStringParam(redirectURI, "redirectURI"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalAuthorizationCodeSecret{ - ClientSecret: clientSecret, - AuthorizationCode: authorizationCode, - RedirectURI: redirectURI, - }, - callbacks..., - ) -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -func GetMSIVMEndpoint() (string, error) { - return msiEndpoint, nil -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the system assigned identity when creating the token. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the specified user assigned identity when creating the token. -func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) -} - -func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if userAssignedID != nil { - if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { - return nil, err - } - } - // We set the oauth config token endpoint to be MSI's endpoint - msiEndpointURL, err := url.Parse(msiEndpoint) - if err != nil { - return nil, err - } - - v := url.Values{} - v.Set("resource", resource) - v.Set("api-version", "2018-02-01") - if userAssignedID != nil { - v.Set("client_id", *userAssignedID) - } - msiEndpointURL.RawQuery = v.Encode() - - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: OAuthConfig{ - TokenEndpoint: *msiEndpointURL, - }, - Secret: &ServicePrincipalMSISecret{}, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, - } - - if userAssignedID != nil { - spt.inner.ClientID = *userAssignedID - } - - return spt, nil -} - -// internal type that implements TokenRefreshError -type tokenRefreshError struct { - message string - resp *http.Response -} - -// Error implements the error interface which is part of the TokenRefreshError interface. -func (tre tokenRefreshError) Error() string { - return tre.message -} - -// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. -func (tre tokenRefreshError) Response() *http.Response { - return tre.resp -} - -func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { - return tokenRefreshError{message: message, resp: resp} -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFresh() error { - return spt.EnsureFreshWithContext(context.Background()) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check to see if the token was already refreshed - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - return spt.refreshInternal(ctx, spt.inner.Resource) - } - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.inner.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.RefreshWithContext(context.Background()) -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, spt.inner.Resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.RefreshExchangeWithContext(context.Background(), resource) -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, resource) -} - -func (spt *ServicePrincipalToken) getGrantType() string { - switch spt.inner.Secret.(type) { - case *ServicePrincipalUsernamePasswordSecret: - return OAuthGrantTypeUserPass - case *ServicePrincipalAuthorizationCodeSecret: - return OAuthGrantTypeAuthorizationCode - default: - return OAuthGrantTypeClientCredentials - } -} - -func isIMDS(u url.URL) bool { - imds, err := url.Parse(msiEndpoint) - if err != nil { - return false - } - return u.Host == imds.Host && u.Path == imds.Path -} - -func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { - req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - req.Header.Add("User-Agent", UserAgent()) - req = req.WithContext(ctx) - if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - v := url.Values{} - v.Set("client_id", spt.inner.ClientID) - v.Set("resource", resource) - - if spt.inner.Token.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.inner.Token.RefreshToken) - // web apps must specify client_secret when refreshing tokens - // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens - if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - } else { - v.Set("grant_type", spt.getGrantType()) - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body - } - - if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { - req.Method = http.MethodGet - req.Header.Set(metadataHeader, "true") - } - - var resp *http.Response - if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) - } else { - resp, err = spt.sender.Do(req) - } - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) - } - - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) - } - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) - } - - // for the following error cases don't return a TokenRefreshError. the operation succeeded - // but some transient failure happened during deserialization. by returning a generic error - // the retry logic will kick in (we don't retry on TokenRefreshError). - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - var token Token - err = json.Unmarshal(rb, &token) - if err != nil { - return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) - } - - spt.inner.Token = token - - return spt.InvokeRefreshCallbacks(token) -} - -// retry logic specific to retrieving a token from the IMDS endpoint -func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { - // copied from client.go due to circular dependency - retries := []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - // extra retry status codes specific to IMDS - retries = append(retries, - http.StatusNotFound, - http.StatusGone, - // all remaining 5xx - http.StatusNotImplemented, - http.StatusHTTPVersionNotSupported, - http.StatusVariantAlsoNegotiates, - http.StatusInsufficientStorage, - http.StatusLoopDetected, - http.StatusNotExtended, - http.StatusNetworkAuthenticationRequired) - - // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance - - const maxDelay time.Duration = 60 * time.Second - - attempt := 0 - delay := time.Duration(0) - - for attempt < maxAttempts { - resp, err = sender.Do(req) - // retry on temporary network errors, e.g. transient network failures. - // if we don't receive a response then assume we can't connect to the - // endpoint so we're likely not running on an Azure VM so don't retry. - if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { - return - } - - // perform exponential backoff with a cap. - // must increment attempt before calculating delay. - attempt++ - // the base value of 2 is the "delta backoff" as specified in the guidance doc - delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) - if delay > maxDelay { - delay = maxDelay - } - - select { - case <-time.After(delay): - // intentionally left blank - case <-req.Context().Done(): - err = req.Context().Err() - return - } - } - return -} - -// returns true if the specified error is a temporary network error or false if it's not. -// if the error doesn't implement the net.Error interface the return value is true. -func isTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} - -// returns true if slice ints contains the value n -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.inner.AutoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.inner.RefreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } - -// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. -func (spt *ServicePrincipalToken) OAuthToken() string { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token.OAuthToken() -} - -// Token returns a copy of the current token. -func (spt *ServicePrincipalToken) Token() Token { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token -} - -// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. -type MultiTenantServicePrincipalToken struct { - PrimaryToken *ServicePrincipalToken - AuxiliaryTokens []*ServicePrincipalToken -} - -// PrimaryOAuthToken returns the primary authorization token. -func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { - return mt.PrimaryToken.OAuthToken() -} - -// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. -func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { - tokens := make([]string, len(mt.AuxiliaryTokens)) - for i := range mt.AuxiliaryTokens { - tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() - } - return tokens -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %v", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %v", err) - } - } - return nil -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %v", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %v", err) - } - } - return nil -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh primary token: %v", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %v", err) - } - } - return nil -} - -// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. -func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - auxTenants := multiTenantCfg.AuxiliaryTenants() - m := MultiTenantServicePrincipalToken{ - AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), - } - primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) - } - m.PrimaryToken = primary - for i := range auxTenants { - aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) - } - m.AuxiliaryTokens[i] = aux - } - return &m, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go deleted file mode 100644 index c867b3484..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go +++ /dev/null @@ -1,45 +0,0 @@ -package adal - -import ( - "fmt" - "runtime" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const number = "v1.0.0" - -var ( - ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. -func UserAgent() string { - return ua -} - -// AddToUserAgent adds an extension to the current user agent -func AddToUserAgent(extension string) error { - if extension != "" { - ua = fmt.Sprintf("%s %s", ua, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index 54e87b5b6..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,336 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" - apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" - bingAPISdkHeader = "X-BingApis-SDK-Client" - golangBingAPISdkHeaderValue = "Go-SDK" - authorization = "Authorization" - basic = "Basic" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// APIKeyAuthorizer implements API Key authorization. -type APIKeyAuthorizer struct { - headers map[string]interface{} - queryParameters map[string]interface{} -} - -// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(headers, nil) -} - -// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. -func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(nil, queryParameters) -} - -// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { - return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. -func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) - } -} - -// CognitiveServicesAuthorizer implements authorization for Cognitive Services. -type CognitiveServicesAuthorizer struct { - subscriptionKey string -} - -// NewCognitiveServicesAuthorizer is -func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { - return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} -} - -// WithAuthorization is -func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[apiKeyAuthorizerHeader] = csa.subscriptionKey - headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // the ordering is important here, prefer RefresherWithContext if available - if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { - err = refresher.EnsureFresh() - } - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, - "Failed to refresh the Token for request to %s", r.URL) - } - return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) - } - return r, err - }) - } -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if s == nil { - s = sender(tls.RenegotiateNever) - } - return &BearerAuthorizerCallback{sender: s, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err == nil && resp.StatusCode == 401 { - defer resp.Body.Close() - if hasBearerChallenge(resp) { - bc, err := newBearerChallenge(resp) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return Prepare(r, ba.WithAuthorization()) - } - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(resp *http.Response) bool { - authHeader := resp.Header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} - -// EventGridKeyAuthorizer implements authorization for event grid using key authentication. -type EventGridKeyAuthorizer struct { - topicKey string -} - -// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer -// with the specified topic key. -func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { - return EventGridKeyAuthorizer{topicKey: topicKey} -} - -// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. -func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { - headers := map[string]interface{}{ - "aeg-sas-key": egta.topicKey, - } - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header -// with the value "Basic " where is a base64-encoded username:password tuple. -type BasicAuthorizer struct { - userName string - password string -} - -// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. -func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { - return &BasicAuthorizer{ - userName: userName, - password: password, - } -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Basic " followed by the base64-encoded username:password tuple. -func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. -type MultiTenantServicePrincipalTokenAuthorizer interface { - WithAuthorization() PrepareDecorator -} - -// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider -func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { - return &multiTenantSPTAuthorizer{tp: tp} -} - -type multiTenantSPTAuthorizer struct { - tp adal.MultitenantOAuthTokenProvider -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the -// primary token along with the auxiliary authorization header using the auxiliary tokens. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, - "Failed to refresh one or more Tokens for request to %s", r.URL) - } - } - r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) - if err != nil { - return r, err - } - auxTokens := mt.tp.AuxiliaryOAuthTokens() - for i := range auxTokens { - auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) - } - return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index aafdf021f..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp == nil { - return false - } - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} - -// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. -func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare((&http.Request{}).WithContext(ctx), - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 1cb41cbeb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,924 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/tracing" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} - -// Future provides a mechanism to access the status and results of an asynchronous request. -// Since futures are stateful they should be passed by value to avoid race conditions. -type Future struct { - pt pollingTracker -} - -// NewFutureFromResponse returns a new Future object initialized -// with the initial response from an asynchronous operation. -func NewFutureFromResponse(resp *http.Response) (Future, error) { - pt, err := createPollingTracker(resp) - return Future{pt: pt}, err -} - -// Response returns the last HTTP response. -func (f Future) Response() *http.Response { - if f.pt == nil { - return nil - } - return f.pt.latestResponse() -} - -// Status returns the last status message of the operation. -func (f Future) Status() string { - if f.pt == nil { - return "" - } - return f.pt.pollingStatus() -} - -// PollingMethod returns the method used to monitor the status of the asynchronous operation. -func (f Future) PollingMethod() PollingMethodType { - if f.pt == nil { - return PollingUnknown - } - return f.pt.pollingMethod() -} - -// DoneWithContext queries the service to see if the operation has completed. -func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - - if f.pt == nil { - return false, autorest.NewError("Future", "Done", "future is not initialized") - } - if f.pt.hasTerminated() { - return true, f.pt.pollingError() - } - if err := f.pt.pollForStatus(ctx, sender); err != nil { - return false, err - } - if err := f.pt.checkForErrors(); err != nil { - return f.pt.hasTerminated(), err - } - if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { - return false, err - } - if err := f.pt.initPollingMethod(); err != nil { - return false, err - } - if err := f.pt.updatePollingMethod(); err != nil { - return false, err - } - return f.pt.hasTerminated(), f.pt.pollingError() -} - -// GetPollingDelay returns a duration the application should wait before checking -// the status of the asynchronous request and true; this value is returned from -// the service via the Retry-After response header. If the header wasn't returned -// then the function returns the zero-value time.Duration and false. -func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.pt == nil { - return 0, false - } - resp := f.pt.latestResponse() - if resp == nil { - return 0, false - } - - retry := resp.Header.Get(autorest.HeaderRetryAfter) - if retry == "" { - return 0, false - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - panic(err) - } - - return d, true -} - -// WaitForCompletionRef will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// If no deadline is specified in the context then the client.PollingDuration will be -// used to determine if a default deadline should be used. -// If PollingDuration is greater than zero the value will be used as the context's timeout. -// If PollingDuration is zero then no default deadline will be used. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - cancelCtx := ctx - // if the provided context already has a deadline don't override it - _, hasDeadline := ctx.Deadline() - if d := client.PollingDuration; !hasDeadline && d != 0 { - var cancel context.CancelFunc - cancelCtx, cancel = context.WithTimeout(ctx, d) - defer cancel() - } - - done, err := f.DoneWithContext(ctx, client) - for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { - if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") - } - // we want delayAttempt to be zero in the non-error case so - // that DelayForBackoff doesn't perform exponential back-off - var delayAttempt int - var delay time.Duration - if err == nil { - // check for Retry-After delay, if not present use the client's polling delay - var ok bool - delay, ok = f.GetPollingDelay() - if !ok { - delay = client.PollingDelay - } - } else { - // there was an error polling for status so perform exponential - // back-off based on the number of attempts using the client's retry - // duration. update attempts after delayAttempt to avoid off-by-one. - delayAttempt = attempts - delay = client.RetryDuration - attempts++ - } - // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) - if !delayElapsed { - return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") - } - } - return -} - -// MarshalJSON implements the json.Marshaler interface. -func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(f.pt) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (f *Future) UnmarshalJSON(data []byte) error { - // unmarshal into JSON object to determine the tracker type - obj := map[string]interface{}{} - err := json.Unmarshal(data, &obj) - if err != nil { - return err - } - if obj["method"] == nil { - return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") - } - method := obj["method"].(string) - switch strings.ToUpper(method) { - case http.MethodDelete: - f.pt = &pollingTrackerDelete{} - case http.MethodPatch: - f.pt = &pollingTrackerPatch{} - case http.MethodPost: - f.pt = &pollingTrackerPost{} - case http.MethodPut: - f.pt = &pollingTrackerPut{} - default: - return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) - } - // now unmarshal into the tracker - return json.Unmarshal(data, &f.pt) -} - -// PollingURL returns the URL used for retrieving the status of the long-running operation. -func (f Future) PollingURL() string { - if f.pt == nil { - return "" - } - return f.pt.pollingURL() -} - -// GetResult should be called once polling has completed successfully. -// It makes the final GET call to retrieve the resultant payload. -func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { - if f.pt.finalGetURL() == "" { - // we can end up in this situation if the async operation returns a 200 - // with no polling URLs. in that case return the response which should - // contain the JSON payload (only do this for successful terminal cases). - if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { - return lr, nil - } - return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") - } - req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) - if err != nil { - return nil, err - } - return sender.Do(req) -} - -type pollingTracker interface { - // these methods can differ per tracker - - // checks the response headers and status code to determine the polling mechanism - updatePollingMethod() error - - // checks the response for tracker-specific error conditions - checkForErrors() error - - // returns true if provisioning state should be checked - provisioningStateApplicable() bool - - // methods common to all trackers - - // initializes a tracker's polling URL and method, called for each iteration. - // these values can be overridden by each polling tracker as required. - initPollingMethod() error - - // initializes the tracker's internal state, call this when the tracker is created - initializeState() error - - // makes an HTTP request to check the status of the LRO - pollForStatus(ctx context.Context, sender autorest.Sender) error - - // updates internal tracker state, call this after each call to pollForStatus - updatePollingState(provStateApl bool) error - - // returns the error response from the service, can be nil - pollingError() error - - // returns the polling method being used - pollingMethod() PollingMethodType - - // returns the state of the LRO as returned from the service - pollingStatus() string - - // returns the URL used for polling status - pollingURL() string - - // returns the URL used for the final GET to retrieve the resource - finalGetURL() string - - // returns true if the LRO is in a terminal state - hasTerminated() bool - - // returns true if the LRO is in a failed terminal state - hasFailed() bool - - // returns true if the LRO is in a successful terminal state - hasSucceeded() bool - - // returns the cached HTTP response after a call to pollForStatus(), can be nil - latestResponse() *http.Response -} - -type pollingTrackerBase struct { - // resp is the last response, either from the submission of the LRO or from polling - resp *http.Response - - // method is the HTTP verb, this is needed for deserialization - Method string `json:"method"` - - // rawBody is the raw JSON response body - rawBody map[string]interface{} - - // denotes if polling is using async-operation or location header - Pm PollingMethodType `json:"pollingMethod"` - - // the URL to poll for status - URI string `json:"pollingURI"` - - // the state of the LRO as returned from the service - State string `json:"lroState"` - - // the URL to GET for the final result - FinalGetURI string `json:"resultURI"` - - // used to hold an error object returned from the service - Err *ServiceError `json:"error,omitempty"` -} - -func (pt *pollingTrackerBase) initializeState() error { - // determine the initial polling state based on response body and/or HTTP status - // code. this is applicable to the initial LRO response, not polling responses! - pt.Method = pt.resp.Request.Method - if err := pt.updateRawBody(); err != nil { - return err - } - switch pt.resp.StatusCode { - case http.StatusOK: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - if pt.hasFailed() { - pt.updateErrorFromResponse() - return pt.pollingError() - } - } else { - pt.State = operationSucceeded - } - case http.StatusCreated: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationInProgress - } - case http.StatusAccepted: - pt.State = operationInProgress - case http.StatusNoContent: - pt.State = operationSucceeded - default: - pt.State = operationFailed - pt.updateErrorFromResponse() - return pt.pollingError() - } - return pt.initPollingMethod() -} - -func (pt pollingTrackerBase) getProvisioningState() *string { - if pt.rawBody != nil && pt.rawBody["properties"] != nil { - p := pt.rawBody["properties"].(map[string]interface{}) - if ps := p["provisioningState"]; ps != nil { - s := ps.(string) - return &s - } - } - return nil -} - -func (pt *pollingTrackerBase) updateRawBody() error { - pt.rawBody = map[string]interface{}{} - if pt.resp.ContentLength != 0 { - defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") - } - // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty - if len(b) == 0 { - return nil - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - if err = json.Unmarshal(b, &pt.rawBody); err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") - } - } - return nil -} - -func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { - req, err := http.NewRequest(http.MethodGet, pt.URI, nil) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") - } - - req = req.WithContext(ctx) - preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) - req, err = preparer.Prepare(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") - } - pt.resp, err = sender.Do(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") - } - if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { - // reset the service error on success case - pt.Err = nil - err = pt.updateRawBody() - } else { - // check response body for error content - pt.updateErrorFromResponse() - err = pt.pollingError() - } - return err -} - -// attempts to unmarshal a ServiceError type from the response body. -// if that fails then make a best attempt at creating something meaningful. -// NOTE: this assumes that the async operation has failed. -func (pt *pollingTrackerBase) updateErrorFromResponse() { - var err error - if pt.resp.ContentLength != 0 { - type respErr struct { - ServiceError *ServiceError `json:"error"` - } - re := respErr{} - defer pt.resp.Body.Close() - var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { - goto Default - } - if err = json.Unmarshal(b, &re); err != nil { - goto Default - } - // unmarshalling the error didn't yield anything, try unwrapped error - if re.ServiceError == nil { - err = json.Unmarshal(b, &re.ServiceError) - if err != nil { - goto Default - } - } - // the unmarshaller will ensure re.ServiceError is non-nil - // even if there was no content unmarshalled so check the code. - if re.ServiceError.Code != "" { - pt.Err = re.ServiceError - return - } - } -Default: - se := &ServiceError{ - Code: pt.pollingStatus(), - Message: "The async operation failed.", - } - if err != nil { - se.InnerError = make(map[string]interface{}) - se.InnerError["unmarshalError"] = err.Error() - } - // stick the response body into the error object in hopes - // it contains something useful to help diagnose the failure. - if len(pt.rawBody) > 0 { - se.AdditionalInfo = []map[string]interface{}{ - pt.rawBody, - } - } - pt.Err = se -} - -func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { - if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { - pt.State = pt.rawBody["status"].(string) - } else { - if pt.resp.StatusCode == http.StatusAccepted { - pt.State = operationInProgress - } else if provStateApl { - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationSucceeded - } - } else { - return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") - } - } - // if the operation has failed update the error state - if pt.hasFailed() { - pt.updateErrorFromResponse() - } - return nil -} - -func (pt pollingTrackerBase) pollingError() error { - if pt.Err == nil { - return nil - } - return pt.Err -} - -func (pt pollingTrackerBase) pollingMethod() PollingMethodType { - return pt.Pm -} - -func (pt pollingTrackerBase) pollingStatus() string { - return pt.State -} - -func (pt pollingTrackerBase) pollingURL() string { - return pt.URI -} - -func (pt pollingTrackerBase) finalGetURL() string { - return pt.FinalGetURI -} - -func (pt pollingTrackerBase) hasTerminated() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) hasFailed() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) -} - -func (pt pollingTrackerBase) hasSucceeded() bool { - return strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) latestResponse() *http.Response { - return pt.resp -} - -// error checking common to all trackers -func (pt pollingTrackerBase) baseCheckForErrors() error { - // for Azure-AsyncOperations the response body cannot be nil or empty - if pt.Pm == PollingAsyncOperation { - if pt.resp.Body == nil || pt.resp.ContentLength == 0 { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") - } - if pt.rawBody["status"] == nil { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") - } - } - return nil -} - -// default initialization of polling URL/method. each verb tracker will update this as required. -func (pt *pollingTrackerBase) initPollingMethod() error { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - return nil - } - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh != "" { - pt.URI = lh - pt.Pm = PollingLocation - return nil - } - // it's ok if we didn't find a polling header, this will be handled elsewhere - return nil -} - -// DELETE - -type pollingTrackerDelete struct { - pollingTrackerBase -} - -func (pt *pollingTrackerDelete) updatePollingMethod() error { - // for 201 the Location header is required - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - } - pt.Pm = PollingLocation - pt.FinalGetURI = pt.URI - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerDelete) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerDelete) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PATCH - -type pollingTrackerPatch struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPatch) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absence of the "final GET" mechanism for PATCH - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - if ao == "" { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } else { - pt.URI = lh - pt.Pm = PollingLocation - } - } - } - return nil -} - -func (pt pollingTrackerPatch) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPatch) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// POST - -type pollingTrackerPost struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPost) updatePollingMethod() error { - // 201 requires Location header - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - pt.FinalGetURI = lh - pt.Pm = PollingLocation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPost) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPost) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PUT - -type pollingTrackerPut struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPut) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPut) checkForErrors() error { - err := pt.baseCheckForErrors() - if err != nil { - return err - } - // if there are no LRO headers then the body cannot be empty - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } - lh, err := getURLFromLocationHeader(pt.resp) - if err != nil { - return err - } - if ao == "" && lh == "" && len(pt.rawBody) == 0 { - return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") - } - return nil -} - -func (pt pollingTrackerPut) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// creates a polling tracker based on the verb of the original request -func createPollingTracker(resp *http.Response) (pollingTracker, error) { - var pt pollingTracker - switch strings.ToUpper(resp.Request.Method) { - case http.MethodDelete: - pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPatch: - pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPost: - pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPut: - pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} - default: - return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) - } - if err := pt.initializeState(); err != nil { - return pt, err - } - // this initializes the polling header values, we do this during creation in case the - // initial response send us invalid values; this way the API call will return a non-nil - // error (not doing this means the error shows up in Future.Done) - return pt, pt.updatePollingMethod() -} - -// gets the polling URL from the Azure-AsyncOperation header. -// ensures the URL is well-formed and absolute. -func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// gets the polling URL from the Location header. -// ensures the URL is well-formed and absolute. -func getURLFromLocationHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// verify that the URL is valid and absolute -func isValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string - -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" - - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" - - // PollingRequestURI indicates the polling method uses the original request URI. - PollingRequestURI PollingMethodType = "RequestURI" - - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) - -// AsyncOpIncompleteError is the type that's returned from a future that has not completed. -type AsyncOpIncompleteError struct { - // FutureType is the name of the type composed of a azure.Future. - FutureType string -} - -// Error returns an error message including the originating type name of the error. -func (e AsyncOpIncompleteError) Error() string { - return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) -} - -// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. -func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { - return AsyncOpIncompleteError{ - FutureType: futureType, - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 3a0a439ff..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,326 +0,0 @@ -// Package azure provides Azure-specific implementations used with AutoRest. -// See the included examples for more detail. -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -// It adhears to the OData v4 specification for error responses. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` -} - -func (se ServiceError) Error() string { - result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) - - if se.Target != nil { - result += fmt.Sprintf(" Target=%q", *se.Target) - } - - if se.Details != nil { - d, err := json.Marshal(se.Details) - if err != nil { - result += fmt.Sprintf(" Details=%v", se.Details) - } - result += fmt.Sprintf(" Details=%v", string(d)) - } - - if se.InnerError != nil { - d, err := json.Marshal(se.InnerError) - if err != nil { - result += fmt.Sprintf(" InnerError=%v", se.InnerError) - } - result += fmt.Sprintf(" InnerError=%v", string(d)) - } - - if se.AdditionalInfo != nil { - d, err := json.Marshal(se.AdditionalInfo) - if err != nil { - result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) - } - result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) - } - - return result -} - -// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. -func (se *ServiceError) UnmarshalJSON(b []byte) error { - // per the OData v4 spec the details field must be an array of JSON objects. - // unfortunately not all services adhear to the spec and just return a single - // object instead of an array with one object. so we have to perform some - // shenanigans to accommodate both cases. - // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 - - type serviceError1 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - type serviceError2 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - se1 := serviceError1{} - err := json.Unmarshal(b, &se1) - if err == nil { - se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) - return nil - } - - se2 := serviceError2{} - err = json.Unmarshal(b, &se2) - if err == nil { - se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) - se.Details = append(se.Details, se2.Details) - return nil - } - return err -} - -func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { - se.Code = code - se.Message = message - se.Target = target - se.Details = details - se.InnerError = inner - se.AdditionalInfo = additional -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// Resource contains details about an Azure resource. -type Resource struct { - SubscriptionID string - ResourceGroup string - Provider string - ResourceType string - ResourceName string -} - -// ParseResourceID parses a resource ID into a ResourceDetails struct. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. -func ParseResourceID(resourceID string) (Resource, error) { - - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` - resourceIDPattern := regexp.MustCompile(resourceIDPatternText) - match := resourceIDPattern.FindStringSubmatch(resourceID) - - if len(match) == 0 { - return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) - } - - v := strings.Split(match[5], "/") - resourceName := v[len(v)-1] - - result := Resource{ - SubscriptionID: match[1], - ResourceGroup: match[2], - Provider: match[3], - ResourceType: match[4], - ResourceName: resourceName, - } - - return result, nil -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } - if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { - return err - } - } - if e.ServiceError.Message == "" { - // if we're here it means the returned error wasn't OData v4 compliant. - // try to unmarshal the body as raw JSON in hopes of getting something. - rawBody := map[string]interface{}{} - if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { - return err - } - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - if len(rawBody) > 0 { - e.ServiceError.Details = []map[string]interface{}{rawBody} - } - } - e.Response = resp - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index 6c20b8179..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,244 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" -) - -const ( - // EnvironmentFilepathName captures the name of the environment variable containing the path to the file - // to be used while populating the Azure Environment. - EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - - // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. - NotAvailable = "N/A" -) - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, -} - -// ResourceIdentifier contains a set of Azure resource IDs. -type ResourceIdentifier struct { - Graph string `json:"graph"` - KeyVault string `json:"keyVault"` - Datalake string `json:"datalake"` - Batch string `json:"batch"` - OperationalInsights string `json:"operationalInsights"` - Storage string `json:"storage"` -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` - TokenAudience string `json:"tokenAudience"` - ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.windows.net/", - BatchManagementEndpoint: "https://batch.core.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.windows.net", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - CosmosDBDNSSuffix: "documents.azure.com", - TokenAudience: "https://management.azure.com/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.azure.net", - Datalake: "https://datalake.azure.net/", - Batch: "https://batch.core.windows.net/", - OperationalInsights: "https://api.loganalytics.io", - Storage: "https://storage.azure.com/", - }, - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", - BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.us", - CosmosDBDNSSuffix: "documents.azure.us", - TokenAudience: "https://management.usgovcloudapi.net/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.usgovcloudapi.net", - Datalake: NotAvailable, - Batch: "https://batch.core.usgovcloudapi.net/", - OperationalInsights: "https://api.loganalytics.us", - Storage: "https://storage.azure.com/", - }, - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", - BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.cn", - CosmosDBDNSSuffix: "documents.azure.cn", - TokenAudience: "https://management.chinacloudapi.cn/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.chinacloudapi.cn/", - KeyVault: "https://vault.azure.cn", - Datalake: NotAvailable, - Batch: "https://batch.chinacloudapi.cn/", - OperationalInsights: NotAvailable, - Storage: "https://storage.azure.com/", - }, - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - ServiceBusEndpoint: "https://servicebus.cloudapi.de/", - BatchManagementEndpoint: "https://batch.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: NotAvailable, - CosmosDBDNSSuffix: "documents.microsoftazure.de", - TokenAudience: "https://management.microsoftazure.de/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.cloudapi.de/", - KeyVault: "https://vault.microsoftazure.de", - Datalake: NotAvailable, - Batch: "https://batch.cloudapi.de/", - OperationalInsights: NotAvailable, - Storage: "https://storage.azure.com/", - }, - } -) - -// EnvironmentFromName returns an Environment based on the common name specified. -func EnvironmentFromName(name string) (Environment, error) { - // IMPORTANT - // As per @radhikagupta5: - // This is technical debt, fundamentally here because Kubernetes is not currently accepting - // contributions to the providers. Once that is an option, the provider should be updated to - // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation - // from this method based on the name that is provided to us. - if strings.EqualFold(name, "AZURESTACKCLOUD") { - return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) - } - - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - - return env, nil -} - -// EnvironmentFromFile loads an Environment from a configuration file available on disk. -// This function is particularly useful in the Hybrid Cloud model, where one must define their own -// endpoints. -func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) - if err != nil { - return - } - - err = json.Unmarshal(fileContents, &unmarshaled) - - return -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go deleted file mode 100644 index 507f9e95c..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ /dev/null @@ -1,245 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -type audience []string - -type authentication struct { - LoginEndpoint string `json:"loginEndpoint"` - Audiences audience `json:"audiences"` -} - -type environmentMetadataInfo struct { - GalleryEndpoint string `json:"galleryEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - PortalEndpoint string `json:"portalEndpoint"` - Authentication authentication `json:"authentication"` -} - -// EnvironmentProperty represent property names that clients can override -type EnvironmentProperty string - -const ( - // EnvironmentName ... - EnvironmentName EnvironmentProperty = "name" - // EnvironmentManagementPortalURL .. - EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" - // EnvironmentPublishSettingsURL ... - EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" - // EnvironmentServiceManagementEndpoint ... - EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" - // EnvironmentResourceManagerEndpoint ... - EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" - // EnvironmentActiveDirectoryEndpoint ... - EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" - // EnvironmentGalleryEndpoint ... - EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" - // EnvironmentKeyVaultEndpoint ... - EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" - // EnvironmentGraphEndpoint ... - EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" - // EnvironmentServiceBusEndpoint ... - EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" - // EnvironmentBatchManagementEndpoint ... - EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" - // EnvironmentStorageEndpointSuffix ... - EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" - // EnvironmentSQLDatabaseDNSSuffix ... - EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" - // EnvironmentTrafficManagerDNSSuffix ... - EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" - // EnvironmentKeyVaultDNSSuffix ... - EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" - // EnvironmentServiceBusEndpointSuffix ... - EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" - // EnvironmentServiceManagementVMDNSSuffix ... - EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" - // EnvironmentResourceManagerVMDNSSuffix ... - EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" - // EnvironmentContainerRegistryDNSSuffix ... - EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" - // EnvironmentTokenAudience ... - EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" -) - -// OverrideProperty represents property name and value that clients can override -type OverrideProperty struct { - Key EnvironmentProperty - Value string -} - -// EnvironmentFromURL loads an Environment from a URL -// This function is particularly useful in the Hybrid Cloud model, where one may define their own -// endpoints. -func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { - var metadataEnvProperties environmentMetadataInfo - - if resourceManagerEndpoint == "" { - return environment, fmt.Errorf("Metadata resource manager endpoint is empty") - } - - if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { - return environment, err - } - - // Give priority to user's override values - overrideProperties(&environment, properties) - - if environment.Name == "" { - environment.Name = "HybridEnvironment" - } - stampDNSSuffix := environment.StorageEndpointSuffix - if stampDNSSuffix == "" { - stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") - environment.StorageEndpointSuffix = stampDNSSuffix - } - if environment.KeyVaultDNSSuffix == "" { - environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) - } - if environment.KeyVaultEndpoint == "" { - environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) - } - if environment.TokenAudience == "" { - environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] - } - if environment.ActiveDirectoryEndpoint == "" { - environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint - } - if environment.ResourceManagerEndpoint == "" { - environment.ResourceManagerEndpoint = resourceManagerEndpoint - } - if environment.GalleryEndpoint == "" { - environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint - } - if environment.GraphEndpoint == "" { - environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint - } - - return environment, nil -} - -func overrideProperties(environment *Environment, properties []OverrideProperty) { - for _, property := range properties { - switch property.Key { - case EnvironmentName: - { - environment.Name = property.Value - } - case EnvironmentManagementPortalURL: - { - environment.ManagementPortalURL = property.Value - } - case EnvironmentPublishSettingsURL: - { - environment.PublishSettingsURL = property.Value - } - case EnvironmentServiceManagementEndpoint: - { - environment.ServiceManagementEndpoint = property.Value - } - case EnvironmentResourceManagerEndpoint: - { - environment.ResourceManagerEndpoint = property.Value - } - case EnvironmentActiveDirectoryEndpoint: - { - environment.ActiveDirectoryEndpoint = property.Value - } - case EnvironmentGalleryEndpoint: - { - environment.GalleryEndpoint = property.Value - } - case EnvironmentKeyVaultEndpoint: - { - environment.KeyVaultEndpoint = property.Value - } - case EnvironmentGraphEndpoint: - { - environment.GraphEndpoint = property.Value - } - case EnvironmentServiceBusEndpoint: - { - environment.ServiceBusEndpoint = property.Value - } - case EnvironmentBatchManagementEndpoint: - { - environment.BatchManagementEndpoint = property.Value - } - case EnvironmentStorageEndpointSuffix: - { - environment.StorageEndpointSuffix = property.Value - } - case EnvironmentSQLDatabaseDNSSuffix: - { - environment.SQLDatabaseDNSSuffix = property.Value - } - case EnvironmentTrafficManagerDNSSuffix: - { - environment.TrafficManagerDNSSuffix = property.Value - } - case EnvironmentKeyVaultDNSSuffix: - { - environment.KeyVaultDNSSuffix = property.Value - } - case EnvironmentServiceBusEndpointSuffix: - { - environment.ServiceBusEndpointSuffix = property.Value - } - case EnvironmentServiceManagementVMDNSSuffix: - { - environment.ServiceManagementVMDNSSuffix = property.Value - } - case EnvironmentResourceManagerVMDNSSuffix: - { - environment.ResourceManagerVMDNSSuffix = property.Value - } - case EnvironmentContainerRegistryDNSSuffix: - { - environment.ContainerRegistryDNSSuffix = property.Value - } - case EnvironmentTokenAudience: - { - environment.TokenAudience = property.Value - } - } - } -} - -func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { - client := autorest.NewClientWithUserAgent("") - managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") - req, _ := http.NewRequest("GET", managementEndpoint, nil) - response, err := client.Do(req) - if err != nil { - return environment, err - } - defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) - if err != nil { - return environment, err - } - err = json.Unmarshal(jsonResponse, &environment) - return environment, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index 86ce9f2b5..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { - return resp, err - } - var re RequestError - err = autorest.Respond( - resp, - autorest.ByUnmarshallingJSON(&re), - ) - if err != nil { - return resp, err - } - err = re - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - regErr := register(client, r, re) - if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) - } - } - } - return resp, err - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0]["target"].(string), nil - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - registrationStartTime := time.Now() - for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) - if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { - return originalReq.Context().Err() - } - } - if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index 1c6a0617a..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,300 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/tls" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "strings" - "time" - - "github.com/Azure/go-autorest/logger" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 - - // DefaultRetryDuration is the duration to wait between retries. - DefaultRetryDuration = 30 * time.Second -) - -var ( - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. -// If there was no response (i.e. the underlying http.Response is nil) the return value is false. -func (r Response) IsHTTPStatus(statusCode int) bool { - if r.Response == nil { - return false - } - return r.Response.StatusCode == statusCode -} - -// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. -// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided -// the return value is false. -func (r Response) HasHTTPStatus(statusCodes ...int) bool { - return ResponseHasStatusCode(r.Response, statusCodes...) -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - // Setting this to zero will use the provided context to control the duration. - PollingDuration time.Duration - - // RetryAttempts sets the default number of retry attempts for client. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar - - // Set to true to skip attempted registration of resource providers (false by default). - SkipResourceProviderRegistration bool -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - return newClient(ua, tls.RenegotiateNever) -} - -// ClientOptions contains various Client configuration options. -type ClientOptions struct { - // UserAgent is an optional user-agent string to append to the default user agent. - UserAgent string - - // Renegotiation is an optional setting to control client-side TLS renegotiation. - Renegotiation tls.RenegotiationSupport -} - -// NewClientWithOptions returns an instance of a Client with the specified values. -func NewClientWithOptions(options ClientOptions) Client { - return newClient(options.UserAgent, options.Renegotiation) -} - -func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: DefaultRetryDuration, - UserAgent: UserAgent(), - } - c.Sender = c.sender(renegotiation) - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations - r, err := Prepare(r, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - var resp *http.Response - if detErr, ok := err.(DetailedError); ok { - // if the authorization failed (e.g. invalid credentials) there will - // be a response associated with the error, be sure to return it. - resp = detErr.Response - } - return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - logger.Instance.WriteRequest(r, logger.Filter{ - Header: func(k string, v []string) (bool, []string) { - // remove the auth token from the log - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { - v = []string{"**REDACTED**"} - } - return true, v - }, - }) - resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) - logger.Instance.WriteResponse(resp, logger.Filter{}) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { - if c.Sender == nil { - return sender(renengotiation) - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c45710656..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod deleted file mode 100644 index 13a1e9803..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/Azure/go-autorest/autorest/date - -go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad04..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39ba9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959b2..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0eb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index f724f3332..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,98 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod deleted file mode 100644 index ab2ae66ac..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/Azure/go-autorest/autorest - -go 1.12 - -require ( - github.com/Azure/go-autorest/autorest/adal v0.5.0 - github.com/Azure/go-autorest/autorest/mocks v0.2.0 - github.com/Azure/go-autorest/logger v0.1.0 - github.com/Azure/go-autorest/tracing v0.5.0 - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 -) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum deleted file mode 100644 index 729b99cd0..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/go.sum +++ /dev/null @@ -1,18 +0,0 @@ -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 6e8ed64eb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,550 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeOctetStream = "application/octet-stream" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerAuxAuthorization = "x-ms-authorization-auxiliary" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// used as a key type in context.WithValue() -type ctxPrepareDecorators struct{} - -// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. -// If no PrepareDecorators are provided the context is unchanged. -func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { - if len(prepareDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) -} - -// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. -func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { - inCtx := ctx.Value(ctxPrepareDecorators{}) - if pd, ok := inCtx.([]PrepareDecorator); ok { - return pd - } - return defaultPrepareDecorators -} - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to -// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before -// adding them. -func WithHeaders(headers map[string]interface{}) PrepareDecorator { - h := ensureValueStrings(headers) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - - for name, value := range h { - r.Header.Set(http.CanonicalHeaderKey(name), value) - } - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. -func AsOctetStream() PrepareDecorator { - return AsContentType(mimeTypeOctetStream) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. -func AsMerge() PrepareDecorator { return WithMethod("MERGE") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithBytes returns a PrepareDecorator that takes a list of bytes -// which passes the bytes directly to the body -func WithBytes(input *[]byte) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if input == nil { - return r, fmt.Errorf("Input Bytes was nil") - } - - r.ContentLength = int64(len(*input)) - r.Body = ioutil.NopCloser(bytes.NewReader(*input)) - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the -// request and sets the Content-Length header. -func WithXML(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := xml.Marshal(v) - if err == nil { - // we have to tack on an XML header - withHeader := xml.Header + string(b) - bytesWithHeader := []byte(withHeader) - - r.ContentLength = int64(len(bytesWithHeader)) - r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := MapToValues(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - v := r.URL.Query() - for key, value := range parameters { - for i := range value { - d, err := url.QueryUnescape(value[i]) - if err != nil { - return r, err - } - value[i] = d - } - v[key] = value - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index 349e1963a..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,269 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingBytes(v *[]byte) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - bytes, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - *v = bytes - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbed7..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 7143cc61b..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index ae15c6bf9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 5e595d7b1..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,407 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/tls" - "fmt" - "log" - "math" - "net/http" - "net/http/cookiejar" - "strconv" - "time" - - "github.com/Azure/go-autorest/tracing" -) - -// used as a key type in context.WithValue() -type ctxSendDecorators struct{} - -// WithSendDecorators adds the specified SendDecorators to the provided context. -// If no SendDecorators are provided the context is unchanged. -func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { - if len(sendDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) -} - -// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. -func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { - inCtx := ctx.Value(ctxSendDecorators{}) - if sd, ok := inCtx.([]SendDecorator); ok { - return sd - } - return defaultSendDecorators -} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(tls.RenegotiateNever), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -func sender(renengotiation tls.RenegotiationSupport) Sender { - // Use behaviour compatible with DefaultTransport, but require TLS minimum version. - defaultTransport := http.DefaultTransport.(*http.Transport) - transport := &http.Transport{ - Proxy: defaultTransport.Proxy, - DialContext: defaultTransport.DialContext, - MaxIdleConns: defaultTransport.MaxIdleConns, - IdleConnTimeout: defaultTransport.IdleConnTimeout, - TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, - ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - Renegotiation: renengotiation, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - return &http.Client{Jar: j, Transport: roundTripper} -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Context().Done()) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequestWithContext(r.Context(), resp) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. -// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) - }) - } -} - -// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the -// specified number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater -// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. -func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) - }) - } -} - -func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt := 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return - } - resp, err = s.Do(rr.Request()) - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // when count429 == false don't count a 429 against the number - // of attempts so that we continue to retry until it succeeds - if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { - attempt++ - } - } - return resp, err -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. -// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. -// The function returns true after successfully waiting for the specified duration. If there is -// no Retry-After header or the wait is cancelled the return value is false. -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - if resp == nil { - return false - } - var dur time.Duration - ra := resp.Header.Get("Retry-After") - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - dur = time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - dur = t.Sub(time.Now()) - } - if dur > 0 { - select { - case <-time.After(dur): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - return DelayForBackoffWithCap(backoff, 0, attempt, cancel) -} - -// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. -// The delay may be canceled by closing the passed channel. If terminated early, returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { - d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second - if cap > 0 && d > cap { - d = cap - } - select { - case <-time.After(d): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index 08cf11c11..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,228 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// AsStringSlice method converts interface{} to []string. This expects a -//that the parameter passed to be a slice or array of a type that has the underlying -//type a string. -func AsStringSlice(s interface{}) ([]string, error) { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") - } - stringSlice := make([]string, 0, v.Len()) - - for i := 0; i < v.Len(); i++ { - stringSlice = append(stringSlice, v.Index(i).String()) - } - return stringSlice, nil -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using the separator. Note that only sep[0] will be used for -// joining if any separator is specified. -func String(v interface{}, sep ...string) string { - if len(sep) == 0 { - return ensureValueString(v) - } - stringSlice, ok := v.([]string) - if ok == false { - var err error - stringSlice, err = AsStringSlice(v) - if err != nil { - panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) - } - } - return ensureValueString(strings.Join(stringSlice, sep[0])) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). -// This is mainly useful for long-running operations that use the Azure-AsyncOperation -// header, so we change the initial PUT into a GET to retrieve the final result. -func ChangeToGet(req *http.Request) *http.Request { - req.Method = "GET" - req.Body = nil - req.ContentLength = 0 - req.Header.Del("Content-Length") - return req -} - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError -// interface. If err is a DetailedError it will walk the chain of Original errors. -func IsTokenRefreshError(err error) bool { - if _, ok := err.(adal.TokenRefreshError); ok { - return true - } - if de, ok := err.(DetailedError); ok { - return IsTokenRefreshError(de.Original) - } - return false -} - -// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false -// if it's not. If the error doesn't implement the net.Error interface the return value is true. -func IsTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 7a71089c9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,41 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "runtime" -) - -const number = "v13.0.2" - -var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent -} - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return number -} diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/logger/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod deleted file mode 100644 index f22ed56bc..000000000 --- a/vendor/github.com/Azure/go-autorest/logger/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/Azure/go-autorest/logger - -go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go deleted file mode 100644 index da09f394c..000000000 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ /dev/null @@ -1,328 +0,0 @@ -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// LevelType tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LevelType uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LevelType = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug -) - -const ( - logNone = "NONE" - logFatal = "FATAL" - logPanic = "PANIC" - logError = "ERROR" - logWarning = "WARNING" - logInfo = "INFO" - logDebug = "DEBUG" - logUnknown = "UNKNOWN" -) - -// ParseLevel converts the specified string into the corresponding LevelType. -func ParseLevel(s string) (lt LevelType, err error) { - switch strings.ToUpper(s) { - case logFatal: - lt = LogFatal - case logPanic: - lt = LogPanic - case logError: - lt = LogError - case logWarning: - lt = LogWarning - case logInfo: - lt = LogInfo - case logDebug: - lt = LogDebug - default: - err = fmt.Errorf("bad log level '%s'", s) - } - return -} - -// String implements the stringer interface for LevelType. -func (lt LevelType) String() string { - switch lt { - case LogNone: - return logNone - case LogFatal: - return logFatal - case LogPanic: - return logPanic - case LogError: - return logError - case LogWarning: - return logWarning - case LogInfo: - return logInfo - case LogDebug: - return logDebug - default: - return logUnknown - } -} - -// Filter defines functions for filtering HTTP request/response content. -type Filter struct { - // URL returns a potentially modified string representation of a request URL. - URL func(u *url.URL) string - - // Header returns a potentially modified set of values for the specified key. - // To completely exclude the header key/values return false. - Header func(key string, val []string) (bool, []string) - - // Body returns a potentially modified request/response body. - Body func(b []byte) []byte -} - -func (f Filter) processURL(u *url.URL) string { - if f.URL == nil { - return u.String() - } - return f.URL(u) -} - -func (f Filter) processHeader(k string, val []string) (bool, []string) { - if f.Header == nil { - return true, val - } - return f.Header(k, val) -} - -func (f Filter) processBody(b []byte) []byte { - if f.Body == nil { - return b - } - return f.Body(b) -} - -// Writer defines methods for writing to a logging facility. -type Writer interface { - // Writeln writes the specified message with the standard log entry header and new-line character. - Writeln(level LevelType, message string) - - // Writef writes the specified format specifier with the standard log entry header and no new-line character. - Writef(level LevelType, format string, a ...interface{}) - - // WriteRequest writes the specified HTTP request to the logger if the log level is greater than - // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no request content is excluded. - WriteRequest(req *http.Request, filter Filter) - - // WriteResponse writes the specified HTTP response to the logger if the log level is greater than - // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no response content is excluded. - WriteResponse(resp *http.Response, filter Filter) -} - -// Instance is the default log writer initialized during package init. -// This can be replaced with a custom implementation as required. -var Instance Writer - -// default log level -var logLevel = LogNone - -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. -// If no value was specified the default value is LogNone. -// Custom loggers can call this to retrieve the configured log level. -func Level() LevelType { - return logLevel -} - -func init() { - // separated for testing purposes - initDefaultLogger() -} - -func initDefaultLogger() { - // init with nilLogger so callers don't have to do a nil check on Default - Instance = nilLogger{} - llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) - if llStr == "" { - return - } - var err error - logLevel, err = ParseLevel(llStr) - if err != nil { - fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) - return - } - if logLevel == LogNone { - return - } - // default to stderr - dest := os.Stderr - lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") - if strings.EqualFold(lfStr, "stdout") { - dest = os.Stdout - } else if lfStr != "" { - lf, err := os.Create(lfStr) - if err == nil { - dest = lf - } else { - fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) - } - } - Instance = fileLogger{ - logLevel: logLevel, - mu: &sync.Mutex{}, - logFile: dest, - } -} - -// the nil logger does nothing -type nilLogger struct{} - -func (nilLogger) Writeln(LevelType, string) {} - -func (nilLogger) Writef(LevelType, string, ...interface{}) {} - -func (nilLogger) WriteRequest(*http.Request, Filter) {} - -func (nilLogger) WriteResponse(*http.Response, Filter) {} - -// A File is used instead of a Logger so the stream can be flushed after every write. -type fileLogger struct { - logLevel LevelType - mu *sync.Mutex // for synchronizing writes to logFile - logFile *os.File -} - -func (fl fileLogger) Writeln(level LevelType, message string) { - fl.Writef(level, "%s\n", message) -} - -func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { - if fl.logLevel >= level { - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) - fl.logFile.Sync() - } -} - -func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { - if req == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) - // dump headers - for k, v := range req.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(req.Header, req.Body) { - // dump body - body, err := ioutil.ReadAll(req.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - if nc, ok := req.Body.(io.Seeker); ok { - // rewind to the beginning - nc.Seek(0, io.SeekStart) - } else { - // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) - } - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { - if resp == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) - // dump headers - for k, v := range resp.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(resp.Header, resp.Body) { - // dump body - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -// returns true if the provided body should be included in the log -func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { - ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") -} - -// creates standard header for log entries, it contains a timestamp and the log level -func entryHeader(level LevelType) string { - // this format provides a fixed number of digits so the size of the timestamp is constant - return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) -} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod deleted file mode 100644 index 25c34c108..000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/Azure/go-autorest/tracing - -go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go deleted file mode 100644 index 0e7a6e962..000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ /dev/null @@ -1,67 +0,0 @@ -package tracing - -// Copyright 2018 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" -) - -// Tracer represents an HTTP tracing facility. -type Tracer interface { - NewTransport(base *http.Transport) http.RoundTripper - StartSpan(ctx context.Context, name string) context.Context - EndSpan(ctx context.Context, httpStatusCode int, err error) -} - -var ( - tracer Tracer -) - -// Register will register the provided Tracer. Pass nil to unregister a Tracer. -func Register(t Tracer) { - tracer = t -} - -// IsEnabled returns true if a Tracer has been registered. -func IsEnabled() bool { - return tracer != nil -} - -// NewTransport creates a new instrumenting http.RoundTripper for the -// registered Tracer. If no Tracer has been registered it returns nil. -func NewTransport(base *http.Transport) http.RoundTripper { - if tracer != nil { - return tracer.NewTransport(base) - } - return nil -} - -// StartSpan starts a trace span with the specified name, associating it with the -// provided context. Has no effect if a Tracer has not been registered. -func StartSpan(ctx context.Context, name string) context.Context { - if tracer != nil { - return tracer.StartSpan(ctx, name) - } - return ctx -} - -// EndSpan ends a previously started span stored in the context. -// Has no effect if a Tracer has not been registered. -func EndSpan(ctx context.Context, httpStatusCode int, err error) { - if tracer != nil { - tracer.EndSpan(ctx, httpStatusCode, err) - } -} diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index 47a6a46ec..000000000 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/vendor/github.com/cenkalti/backoff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 55ebc98fc..000000000 --- a/vendor/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -See https://godoc.org/github.com/cenkalti/backoff#pkg-examples - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/backoff.go deleted file mode 100644 index 3676ee405..000000000 --- a/vendor/github.com/cenkalti/backoff/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/context.go b/vendor/github.com/cenkalti/backoff/context.go deleted file mode 100644 index 7706faa2b..000000000 --- a/vendor/github.com/cenkalti/backoff/context.go +++ /dev/null @@ -1,63 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func ensureContext(b BackOff) BackOffContext { - if cb, ok := b.(BackOffContext); ok { - return cb - } - return WithContext(b, context.Background()) -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - } - next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { - return Stop - } - return next -} diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/exponential.go deleted file mode 100644 index a031a6597..000000000 --- a/vendor/github.com/cenkalti/backoff/exponential.go +++ /dev/null @@ -1,153 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop - } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index e936a506f..000000000 --- a/vendor/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - var err error - var next time.Duration - var t *time.Timer - - cb := ensureContext(b) - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if permanent, ok := err.(*PermanentError); ok { - return permanent.Err - } - - if next = cb.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - if t == nil { - t = time.NewTimer(next) - defer t.Stop() - } else { - t.Reset(next) - } - - select { - case <-cb.Context().Done(): - return err - case <-t.C: - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) *PermanentError { - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/ticker.go deleted file mode 100644 index e41084b0e..000000000 --- a/vendor/github.com/cenkalti/backoff/ticker.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import ( - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOffContext - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: ensureContext(b), - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.b.Context().Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - return time.After(next) -} diff --git a/vendor/github.com/cenkalti/backoff/tries.go b/vendor/github.com/cenkalti/backoff/tries.go deleted file mode 100644 index cfeefd9b7..000000000 --- a/vendor/github.com/cenkalti/backoff/tries.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f..000000000 --- a/vendor/github.com/cespare/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md deleted file mode 100644 index 0982fd25e..000000000 --- a/vendor/github.com/cespare/xxhash/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -The API is very small, taking its cue from the other hashing packages in the -standard library: - - $ go doc github.com/cespare/xxhash ! - package xxhash // import "github.com/cespare/xxhash" - - Package xxhash implements the 64-bit variant of xxHash (XXH64) as described - at http://cyan4973.github.io/xxHash/. - - func New() hash.Hash64 - func Sum64(b []byte) uint64 - func Sum64String(s string) uint64 - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64 against another popular Go XXH64 implementation, -[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): - -| input size | OneOfOne | cespare (purego) | cespare | -| --- | --- | --- | --- | -| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | -| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | -| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | -| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | - -These numbers were generated with: - -``` -$ go test -benchtime 10s -bench '/OneOfOne,' -$ go test -tags purego -benchtime 10s -bench '/xxhash,' -$ go test -benchtime 10s -bench '/xxhash,' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/go.mod b/vendor/github.com/cespare/xxhash/go.mod deleted file mode 100644 index 10605a6a5..000000000 --- a/vendor/github.com/cespare/xxhash/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module github.com/cespare/xxhash - -require ( - github.com/OneOfOne/xxhash v1.2.2 - github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 -) diff --git a/vendor/github.com/cespare/xxhash/go.sum b/vendor/github.com/cespare/xxhash/go.sum deleted file mode 100644 index f6b554261..000000000 --- a/vendor/github.com/cespare/xxhash/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go deleted file mode 100644 index f3eac5ebc..000000000 --- a/vendor/github.com/cespare/xxhash/rotate.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package xxhash - -// TODO(caleb): After Go 1.10 comes out, remove this fallback code. - -func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go deleted file mode 100644 index b99612bab..000000000 --- a/vendor/github.com/cespare/xxhash/rotate19.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.9 - -package xxhash - -import "math/bits" - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go deleted file mode 100644 index f896bd28f..000000000 --- a/vendor/github.com/cespare/xxhash/xxhash.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "hash" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -type xxh struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total int - mem [32]byte - n int // how much of mem is used -} - -// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. -func New() hash.Hash64 { - var x xxh - x.Reset() - return &x -} - -func (x *xxh) Reset() { - x.n = 0 - x.total = 0 - x.v1 = prime1v + prime2 - x.v2 = prime2 - x.v3 = 0 - x.v4 = -prime1v -} - -func (x *xxh) Size() int { return 8 } -func (x *xxh) BlockSize() int { return 32 } - -// Write adds more data to x. It always returns len(b), nil. -func (x *xxh) Write(b []byte) (n int, err error) { - n = len(b) - x.total += len(b) - - if x.n+len(b) < 32 { - // This new data doesn't even fill the current block. - copy(x.mem[x.n:], b) - x.n += len(b) - return - } - - if x.n > 0 { - // Finish off the partial block. - copy(x.mem[x.n:], b) - x.v1 = round(x.v1, u64(x.mem[0:8])) - x.v2 = round(x.v2, u64(x.mem[8:16])) - x.v3 = round(x.v3, u64(x.mem[16:24])) - x.v4 = round(x.v4, u64(x.mem[24:32])) - b = b[32-x.n:] - x.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - b = writeBlocks(x, b) - } - - // Store any remaining partial block. - copy(x.mem[:], b) - x.n = len(b) - - return -} - -func (x *xxh) Sum(b []byte) []byte { - s := x.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -func (x *xxh) Sum64() uint64 { - var h uint64 - - if x.total >= 32 { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = x.v3 + prime5 - } - - h += uint64(x.total) - - i, end := 0, x.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(x.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(x.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(x.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go deleted file mode 100644 index d61765268..000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s deleted file mode 100644 index 757f2011f..000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,233 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the x pointer. - -// func writeBlocks(x *xxh, b []byte) []byte -TEXT ·writeBlocks(SB), NOSPLIT, $0-56 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from x. - MOVQ x+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to x. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // Construct return slice. - // NOTE: It's important that we don't construct a slice that has a base - // pointer off the end of the original slice, as in Go 1.7+ this will - // cause runtime crashes. (See discussion in, for example, - // https://github.com/golang/go/issues/16772.) - // Therefore, we calculate the length/cap first, and if they're zero, we - // keep the old base. This is what the compiler does as well if you - // write code like - // b = b[len(b):] - - // New length is 32 - (CX - BX) -> BX+32 - CX. - ADDQ $32, BX - SUBQ CX, BX - JZ afterSetBase - - MOVQ CX, ret_base+32(FP) - -afterSetBase: - MOVQ BX, ret_len+40(FP) - MOVQ BX, ret_cap+48(FP) // set cap == len - - RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go deleted file mode 100644 index c68d13f89..000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_other.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // x := New() - // x.Write(b) - // return x.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(x *xxh, b []byte) []byte { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 - return b -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go deleted file mode 100644 index dfa15ab7e..000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_safe.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go deleted file mode 100644 index d2b64e8bb..000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -// -// TODO(caleb): Consider removing this if an optimization is ever added to make -// it unnecessary: https://golang.org/issue/2205. -// -// TODO(caleb): We still have a function call; we could instead write Go/asm -// copies of Sum64 for strings to squeeze out a bit more speed. -func Sum64String(s string) uint64 { - // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ - // for some discussion about this unsafe conversion. - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md deleted file mode 100644 index 1cade6cef..000000000 --- a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go deleted file mode 100644 index af62279a6..000000000 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go +++ /dev/null @@ -1,20 +0,0 @@ -package md2man - -import ( - "github.com/russross/blackfriday" -) - -// Render converts a markdown document into a roff formatted document. -func Render(doc []byte) []byte { - renderer := RoffRenderer(0) - extensions := 0 - extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS - extensions |= blackfriday.EXTENSION_TABLES - extensions |= blackfriday.EXTENSION_FENCED_CODE - extensions |= blackfriday.EXTENSION_AUTOLINK - extensions |= blackfriday.EXTENSION_SPACE_HEADERS - extensions |= blackfriday.EXTENSION_FOOTNOTES - extensions |= blackfriday.EXTENSION_TITLEBLOCK - - return blackfriday.Markdown(doc, renderer, extensions) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go deleted file mode 100644 index 8c29ec687..000000000 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go +++ /dev/null @@ -1,285 +0,0 @@ -package md2man - -import ( - "bytes" - "fmt" - "html" - "strings" - - "github.com/russross/blackfriday" -) - -type roffRenderer struct { - ListCounters []int -} - -// RoffRenderer creates a new blackfriday Renderer for generating roff documents -// from markdown -func RoffRenderer(flags int) blackfriday.Renderer { - return &roffRenderer{} -} - -func (r *roffRenderer) GetFlags() int { - return 0 -} - -func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { - out.WriteString(".TH ") - - splitText := bytes.Split(text, []byte("\n")) - for i, line := range splitText { - line = bytes.TrimPrefix(line, []byte("% ")) - if i == 0 { - line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) - line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) - } - line = append([]byte("\""), line...) - line = append(line, []byte("\" ")...) - out.Write(line) - } - out.WriteString("\n") - - // disable hyphenation - out.WriteString(".nh\n") - // disable justification (adjust text to left margin only) - out.WriteString(".ad l\n") -} - -func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { - out.WriteString("\n.PP\n.RS\n\n.nf\n") - escapeSpecialChars(out, text) - out.WriteString("\n.fi\n.RE\n") -} - -func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { - out.WriteString("\n.PP\n.RS\n") - out.Write(text) - out.WriteString("\n.RE\n") -} - -func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint - out.Write(text) -} - -func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { - marker := out.Len() - - switch { - case marker == 0: - // This is the doc header - out.WriteString(".TH ") - case level == 1: - out.WriteString("\n\n.SH ") - case level == 2: - out.WriteString("\n.SH ") - default: - out.WriteString("\n.SS ") - } - - if !text() { - out.Truncate(marker) - return - } -} - -func (r *roffRenderer) HRule(out *bytes.Buffer) { - out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") -} - -func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { - marker := out.Len() - r.ListCounters = append(r.ListCounters, 1) - out.WriteString("\n.RS\n") - if !text() { - out.Truncate(marker) - return - } - r.ListCounters = r.ListCounters[:len(r.ListCounters)-1] - out.WriteString("\n.RE\n") -} - -func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { - if flags&blackfriday.LIST_TYPE_ORDERED != 0 { - out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1])) - r.ListCounters[len(r.ListCounters)-1]++ - } else { - out.WriteString(".IP \\(bu 2\n") - } - out.Write(text) - out.WriteString("\n") -} - -func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { - marker := out.Len() - out.WriteString("\n.PP\n") - if !text() { - out.Truncate(marker) - return - } - if marker != 0 { - out.WriteString("\n") - } -} - -func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { - out.WriteString("\n.TS\nallbox;\n") - - maxDelims := 0 - lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n") - for _, w := range lines { - curDelims := strings.Count(w, "\t") - if curDelims > maxDelims { - maxDelims = curDelims - } - } - out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n")) - out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n")) - out.Write(header) - if len(header) > 0 { - out.Write([]byte("\n")) - } - - out.Write(body) - out.WriteString("\n.TE\n") -} - -func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { - if out.Len() > 0 { - out.WriteString("\n") - } - out.Write(text) -} - -func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { - if out.Len() > 0 { - out.WriteString("\t") - } - if len(text) == 0 { - text = []byte{' '} - } - out.Write([]byte("\\fB\\fC" + string(text) + "\\fR")) -} - -func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { - if out.Len() > 0 { - out.WriteString("\t") - } - if len(text) > 30 { - text = append([]byte("T{\n"), text...) - text = append(text, []byte("\nT}")...) - } - if len(text) == 0 { - text = []byte{' '} - } - out.Write(text) -} - -func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { - -} - -func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { - -} - -func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { - out.WriteString("\n\\[la]") - out.Write(link) - out.WriteString("\\[ra]") -} - -func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { - out.WriteString("\\fB\\fC") - escapeSpecialChars(out, text) - out.WriteString("\\fR") -} - -func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\fB") - out.Write(text) - out.WriteString("\\fP") -} - -func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\fI") - out.Write(text) - out.WriteString("\\fP") -} - -func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { -} - -func (r *roffRenderer) LineBreak(out *bytes.Buffer) { - out.WriteString("\n.br\n") -} - -func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { - out.Write(content) - r.AutoLink(out, link, 0) -} - -func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint - out.Write(tag) -} - -func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\s+2") - out.Write(text) - out.WriteString("\\s-2") -} - -func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { -} - -func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { - -} - -func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { - out.WriteString(html.UnescapeString(string(entity))) -} - -func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { - escapeSpecialChars(out, text) -} - -func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { -} - -func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { -} - -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - -func escapeSpecialChars(out *bytes.Buffer, text []byte) { - for i := 0; i < len(text); i++ { - // escape initial apostrophe or period - if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { - out.WriteString("\\&") - } - - // directly copy normal characters - org := i - - for i < len(text) && !needsBackslash(text[i]) { - i++ - } - if i > org { - out.Write(text[org:i]) - } - - // escape a character - if i >= len(text) { - break - } - out.WriteByte('\\') - out.WriteByte(text[i]) - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore deleted file mode 100644 index 80bed650e..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin - - diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index 1027f56cd..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -script: - - go vet ./... - - go test -v ./... - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE deleted file mode 100644 index df83a9c2f..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2012 Dave Grijalva - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index 7fc1f793c..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,97 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index d358d881b..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# jwt-go - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) -[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) - -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) - -**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. - -**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. - -**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: - -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. - -**BREAKING CHANGES:*** -* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### Signing Methods and Key Types - -Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: - -* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index 637029831..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,118 +0,0 @@ -## `jwt-go` Version History - -#### 3.2.0 - -* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation -* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate -* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. -* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go deleted file mode 100644 index f0228f02e..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/claims.go +++ /dev/null @@ -1,134 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if c.VerifyExpiresAt(now, false) == false { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if c.VerifyIssuedAt(now, false) == false { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if c.VerifyNotBefore(now, false) == false { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud string, cmp string, required bool) bool { - if aud == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go deleted file mode 100644 index a86dc1a3b..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go deleted file mode 100644 index f97738124..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ /dev/null @@ -1,148 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKeyType - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { - return nil - } else { - return ErrECDSAVerification - } -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go deleted file mode 100644 index d19624b72..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go +++ /dev/null @@ -1,67 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go deleted file mode 100644 index 1c93024aa..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ /dev/null @@ -1,59 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } -} - -// No errors -func (e *ValidationError) valid() bool { - return e.Errors == 0 -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go deleted file mode 100644 index addbe5d40..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ /dev/null @@ -1,95 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -// Expects key type of []byte for both signing and validation -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKeyType - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKeyType -} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go deleted file mode 100644 index 291213c46..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/map_claims.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - // "fmt" -) - -// Claims type that uses the map[string]interface{} for JSON decoding -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - aud, _ := m["aud"].(string) - return verifyAud(aud, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - switch exp := m["exp"].(type) { - case float64: - return verifyExp(int64(exp), cmp, req) - case json.Number: - v, _ := exp.Int64() - return verifyExp(v, cmp, req) - } - return req == false -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - switch iat := m["iat"].(type) { - case float64: - return verifyIat(int64(iat), cmp, req) - case json.Number: - v, _ := iat.Int64() - return verifyIat(v, cmp, req) - } - return req == false -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - switch nbf := m["nbf"].(type) { - case float64: - return verifyNbf(int64(nbf), cmp, req) - case json.Number: - v, _ := nbf.Int64() - return verifyNbf(v, cmp, req) - } - return req == false -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if m.VerifyExpiresAt(now, false) == false { - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if m.VerifyIssuedAt(now, false) == false { - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if m.VerifyNotBefore(now, false) == false { - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go deleted file mode 100644 index f04d189d0..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// Implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go deleted file mode 100644 index d6901d9ad..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ /dev/null @@ -1,148 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - token, parts, err := p.ParseUnverified(tokenString, claims) - if err != nil { - return token, err - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - if ve, ok := err.(*ValidationError); ok { - return token, ve - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { - - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e - } - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} - -// WARNING: Don't use this method unless you know what you're doing -// -// This method parses the token but doesn't validate the signature. It's only -// ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from -// it. -func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - token = &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error - if err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - return token, parts, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go deleted file mode 100644 index e4caf1ca4..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be an *rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be an *rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go deleted file mode 100644 index 10ee9db8a..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions -} - -// Specific instances for RS/PS and company -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA256, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA384, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA512, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go deleted file mode 100644 index a5ababf95..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 private key protected with password -func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - - var blockDecrypted []byte - if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { - return nil, err - } - - if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go deleted file mode 100644 index ed1f212b2..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ /dev/null @@ -1,35 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go deleted file mode 100644 index d637e0867..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/token.go +++ /dev/null @@ -1,108 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/elithrar/simple-scrypt/.gitignore b/vendor/github.com/elithrar/simple-scrypt/.gitignore deleted file mode 100644 index 843bb79fc..000000000 --- a/vendor/github.com/elithrar/simple-scrypt/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -*.DS_Store diff --git a/vendor/github.com/elithrar/simple-scrypt/.travis.yml b/vendor/github.com/elithrar/simple-scrypt/.travis.yml deleted file mode 100644 index b495da936..000000000 --- a/vendor/github.com/elithrar/simple-scrypt/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -sudo: false - -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v ./... diff --git a/vendor/github.com/elithrar/simple-scrypt/LICENSE b/vendor/github.com/elithrar/simple-scrypt/LICENSE deleted file mode 100644 index e8988e7b4..000000000 --- a/vendor/github.com/elithrar/simple-scrypt/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Matthew Silverlock (matt@eatsleeprepeat.net) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/elithrar/simple-scrypt/README.md b/vendor/github.com/elithrar/simple-scrypt/README.md deleted file mode 100644 index 4a093e186..000000000 --- a/vendor/github.com/elithrar/simple-scrypt/README.md +++ /dev/null @@ -1,155 +0,0 @@ -# simple-scrypt -[![GoDoc](https://godoc.org/github.com/elithrar/simple-scrypt?status.svg)](https://godoc.org/github.com/elithrar/simple-scrypt) [![Build Status](https://travis-ci.org/elithrar/simple-scrypt.svg?branch=master)](https://travis-ci.org/elithrar/simple-scrypt) - -simple-scrypt provides a convenience wrapper around Go's existing -[scrypt](http://golang.org/x/crypto/scrypt) package that makes it easier to -securely derive strong keys ("hash user passwords"). This library allows you to: - -* Generate a scrypt derived key with a crytographically secure salt and sane - default parameters for N, r and p. -* Upgrade the parameters used to generate keys as hardware improves by storing - them with the derived key (the scrypt spec. doesn't allow for this by - default). -* Provide your own parameters (if you wish to). - -The API closely mirrors Go's [bcrypt](https://golang.org/x/crypto/bcrypt) -library in an effort to make it easy to migrate—and because it's an easy to grok -API. - -## Installation - -With a [working Go toolchain](https://golang.org/doc/code.html): - -```sh -go get -u github.com/elithrar/simple-scrypt -``` - -## Example - -simple-scrypt doesn't try to re-invent the wheel or do anything "special". It -wraps the `scrypt.Key` function as thinly as possible, generates a -crytographically secure salt for you using Go's `crypto/rand` package, and -returns the derived key with the parameters prepended: - -```go -package main - -import( - "fmt" - "log" - - "github.com/elithrar/simple-scrypt" -) - -func main() { - // e.g. r.PostFormValue("password") - passwordFromForm := "prew8fid9hick6c" - - // Generates a derived key of the form "N$r$p$salt$dk" where N, r and p are defined as per - // Colin Percival's scrypt paper: http://www.tarsnap.com/scrypt/scrypt.pdf - // scrypt.Defaults (N=16384, r=8, p=1) makes it easy to provide these parameters, and - // (should you wish) provide your own values via the scrypt.Params type. - hash, err := scrypt.GenerateFromPassword([]byte(passwordFromForm), scrypt.DefaultParams) - if err != nil { - log.Fatal(err) - } - - // Print the derived key with its parameters prepended. - fmt.Printf("%s\n", hash) - - // Uses the parameters from the existing derived key. Return an error if they don't match. - err := scrypt.CompareHashAndPassword(hash, []byte(passwordFromForm)) - if err != nil { - log.Fatal(err) - } -} -``` - -## Upgrading Parameters - -Upgrading derived keys from a set of parameters to a "stronger" set of parameters -as hardware improves, or as you scale (and move your auth process to separate -hardware), can be pretty useful. Here's how to do it with simple-scrypt: - -```go -func main() { - // SCENE: We've successfully authenticated a user, compared their submitted - // (cleartext) password against the derived key stored in our database, and - // now want to upgrade the parameters (more rounds, more parallelism) to - // reflect some shiny new hardware we just purchased. As the user is logging - // in, we can retrieve the parameters used to generate their key, and if - // they don't match our "new" parameters, we can re-generate the key while - // we still have the cleartext password in memory - // (e.g. before the HTTP request ends). - current, err := scrypt.Cost(hash) - if err != nil { - log.Fatal(err) - } - - // Now to check them against our own Params struct (e.g. using reflect.DeepEquals) - // and determine whether we want to generate a new key with our "upgraded" parameters. - slower := scrypt.Params{ - N: 32768, - R: 8, - P: 2, - SaltLen: 16, - DKLen: 32, - } - - if !reflect.DeepEqual(current, slower) { - // Re-generate the key with the slower parameters - // here using scrypt.GenerateFromPassword - } -} -``` - -## Automatically Determining Parameters - -Thanks to the work by [tgulacsi](https://github.com/tgulacsi), you can have simple-scrypt -automatically determine the optimal parameters for you (time vs. memory). You should run this once -on program startup, as calibrating parameters can be an expensive operation. - -```go -var params scrypt.Params - -func main() { - var err error - // 500ms, 64MB of RAM per hash. - params, err = scrypt.Calibrate(500*time.Millisecond, 64, Params{}) - if err != nil { - return nil, err - } - - ... -} - -func RegisterUserHandler(w http.ResponseWriter, r *http.Request) { - err := r.ParseForm() - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Make sure you validate: not empty, not too long, etc. - email := r.PostFormValue("email") - pass := r.PostFormValue("password") - - // Use our calibrated parameters - hash, err := scrypt.GenerateFromPassword([]byte(pass), params) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Save to DB, etc. -} -``` - -Be aware that increasing these, whilst making it harder to brute-force the resulting hash, also -increases the risk of a denial-of-service attack against your server. A surge in authenticate -attempts (even if legitimate!) could consume all available resources. - -## License - -MIT Licensed. See LICENSE file for details. - diff --git a/vendor/github.com/elithrar/simple-scrypt/scrypt.go b/vendor/github.com/elithrar/simple-scrypt/scrypt.go deleted file mode 100644 index 2742ea02a..000000000 --- a/vendor/github.com/elithrar/simple-scrypt/scrypt.go +++ /dev/null @@ -1,309 +0,0 @@ -// Package scrypt provides a convenience wrapper around Go's existing scrypt package -// that makes it easier to securely derive strong keys from weak -// inputs (i.e. user passwords). -// The package provides password generation, constant-time comparison and -// parameter upgrading for scrypt derived keys. -package scrypt - -import ( - "crypto/rand" - "crypto/subtle" - "encoding/hex" - "errors" - "fmt" - "strconv" - "strings" - "time" - - "golang.org/x/crypto/scrypt" -) - -// Constants -const ( - maxInt = 1<<31 - 1 - minDKLen = 16 // the minimum derived key length in bytes. - minSaltLen = 8 // the minimum allowed salt length in bytes. -) - -// Params describes the input parameters to the scrypt -// key derivation function as per Colin Percival's scrypt -// paper: http://www.tarsnap.com/scrypt/scrypt.pdf -type Params struct { - N int // CPU/memory cost parameter (logN) - R int // block size parameter (octets) - P int // parallelisation parameter (positive int) - SaltLen int // bytes to use as salt (octets) - DKLen int // length of the derived key (octets) -} - -// DefaultParams provides sensible default inputs into the scrypt function -// for interactive use (i.e. web applications). -// These defaults will consume approxmiately 16MB of memory (128 * r * N). -// The default key length is 256 bits. -var DefaultParams = Params{N: 16384, R: 8, P: 1, SaltLen: 16, DKLen: 32} - -// ErrInvalidHash is returned when failing to parse a provided scrypt -// hash and/or parameters. -var ErrInvalidHash = errors.New("scrypt: the provided hash is not in the correct format") - -// ErrInvalidParams is returned when the cost parameters (N, r, p), salt length -// or derived key length are invalid. -var ErrInvalidParams = errors.New("scrypt: the parameters provided are invalid") - -// ErrMismatchedHashAndPassword is returned when a password (hashed) and -// given hash do not match. -var ErrMismatchedHashAndPassword = errors.New("scrypt: the hashed password does not match the hash of the given password") - -// GenerateRandomBytes returns securely generated random bytes. -// It will return an error if the system's secure random -// number generator fails to function correctly, in which -// case the caller should not continue. -func GenerateRandomBytes(n int) ([]byte, error) { - b := make([]byte, n) - _, err := rand.Read(b) - // err == nil only if len(b) == n - if err != nil { - return nil, err - } - - return b, nil -} - -// GenerateFromPassword returns the derived key of the password using the -// parameters provided. The parameters are prepended to the derived key and -// separated by the "$" character (0x24). -// If the parameters provided are less than the minimum acceptable values, -// an error will be returned. -func GenerateFromPassword(password []byte, params Params) ([]byte, error) { - salt, err := GenerateRandomBytes(params.SaltLen) - if err != nil { - return nil, err - } - - if err := params.Check(); err != nil { - return nil, err - } - - // scrypt.Key returns the raw scrypt derived key. - dk, err := scrypt.Key(password, salt, params.N, params.R, params.P, params.DKLen) - if err != nil { - return nil, err - } - - // Prepend the params and the salt to the derived key, each separated - // by a "$" character. The salt and the derived key are hex encoded. - return []byte(fmt.Sprintf("%d$%d$%d$%x$%x", params.N, params.R, params.P, salt, dk)), nil -} - -// CompareHashAndPassword compares a derived key with the possible cleartext -// equivalent. The parameters used in the provided derived key are used. -// The comparison performed by this function is constant-time. It returns nil -// on success, and an error if the derived keys do not match. -func CompareHashAndPassword(hash []byte, password []byte) error { - // Decode existing hash, retrieve params and salt. - params, salt, dk, err := decodeHash(hash) - if err != nil { - return err - } - - // scrypt the cleartext password with the same parameters and salt - other, err := scrypt.Key(password, salt, params.N, params.R, params.P, params.DKLen) - if err != nil { - return err - } - - // Constant time comparison - if subtle.ConstantTimeCompare(dk, other) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Check checks that the parameters are valid for input into the -// scrypt key derivation function. -func (p *Params) Check() error { - // Validate N - if p.N > maxInt || p.N <= 1 || p.N%2 != 0 { - return ErrInvalidParams - } - - // Validate r - if p.R < 1 || p.R > maxInt { - return ErrInvalidParams - } - - // Validate p - if p.P < 1 || p.P > maxInt { - return ErrInvalidParams - } - - // Validate that r & p don't exceed 2^30 and that N, r, p values don't - // exceed the limits defined by the scrypt algorithm. - if uint64(p.R)*uint64(p.P) >= 1<<30 || p.R > maxInt/128/p.P || p.R > maxInt/256 || p.N > maxInt/128/p.R { - return ErrInvalidParams - } - - // Validate the salt length - if p.SaltLen < minSaltLen || p.SaltLen > maxInt { - return ErrInvalidParams - } - - // Validate the derived key length - if p.DKLen < minDKLen || p.DKLen > maxInt { - return ErrInvalidParams - } - - return nil -} - -// decodeHash extracts the parameters, salt and derived key from the -// provided hash. It returns an error if the hash format is invalid and/or -// the parameters are invalid. -func decodeHash(hash []byte) (Params, []byte, []byte, error) { - vals := strings.Split(string(hash), "$") - - // P, N, R, salt, scrypt derived key - if len(vals) != 5 { - return Params{}, nil, nil, ErrInvalidHash - } - - var params Params - var err error - - params.N, err = strconv.Atoi(vals[0]) - if err != nil { - return params, nil, nil, ErrInvalidHash - } - - params.R, err = strconv.Atoi(vals[1]) - if err != nil { - return params, nil, nil, ErrInvalidHash - } - - params.P, err = strconv.Atoi(vals[2]) - if err != nil { - return params, nil, nil, ErrInvalidHash - } - - salt, err := hex.DecodeString(vals[3]) - if err != nil { - return params, nil, nil, ErrInvalidHash - } - params.SaltLen = len(salt) - - dk, err := hex.DecodeString(vals[4]) - if err != nil { - return params, nil, nil, ErrInvalidHash - } - params.DKLen = len(dk) - - if err := params.Check(); err != nil { - return params, nil, nil, err - } - - return params, salt, dk, nil -} - -// Cost returns the scrypt parameters used to generate the derived key. This -// allows a package user to increase the cost (in time & resources) used as -// computational performance increases over time. -func Cost(hash []byte) (Params, error) { - params, _, _, err := decodeHash(hash) - - return params, err -} - -// Calibrate returns the hardest parameters, allowed by the given limits. -// The returned params will not use more memory than the given (MiB); -// will not take more time than the given timeout, but more than timeout/2. -// -// -// The default timeout (when the timeout arg is zero) is 200ms. -// The default memMiBytes (when memMiBytes is zero) is 16MiB. -// The default parameters (when params == Params{}) is DefaultParams. -func Calibrate(timeout time.Duration, memMiBytes int, params Params) (Params, error) { - p := params - if p.N == 0 || p.R == 0 || p.P == 0 || p.SaltLen == 0 || p.DKLen == 0 { - p = DefaultParams - } else if err := p.Check(); err != nil { - return p, err - } - if timeout == 0 { - timeout = 200 * time.Millisecond - } - if memMiBytes == 0 { - memMiBytes = 16 - } - salt, err := GenerateRandomBytes(p.SaltLen) - if err != nil { - return p, err - } - password := []byte("weakpassword") - - // r is fixed to 8 and should not be used to tune the memory usage - // if the cache lines of future processors are bigger, then r should be increased - // see: https://blog.filippo.io/the-scrypt-parameters/ - p.R = 8 - - // Scrypt runs p independent mixing functions with a memory requirement of roughly - // 128 * r * N. Depending on the implementation these can be run sequentially or parallel. - // The go implementation runs them sequentially, therefore p can be used to adjust the execution time of scrypt. - - // we start with p=1 and only increase it if we have to - p.P = 1 - - // Memory usage is at least 128 * r * N, see - // http://blog.ircmaxell.com/2014/03/why-i-dont-recommend-scrypt.html - // or https://drupal.org/comment/4675994#comment-4675994 - - // calculate N based on the desired memory usage - memBytes := memMiBytes << 20 - p.N = 1 - for 128*int64(p.R)*int64(p.N) < int64(memBytes) { - p.N <<= 1 - } - p.N >>= 1 - - // calculate the current execution time - start := time.Now() - if _, err := scrypt.Key(password, salt, p.N, p.R, p.P, p.DKLen); err != nil { - return p, err - } - dur := time.Since(start) - - // reduce N if scrypt takes to long - for dur > timeout { - p.N >>= 1 - - start = time.Now() - if _, err := scrypt.Key(password, salt, p.N, p.R, p.P, p.DKLen); err != nil { - return p, err - } - dur = time.Since(start) - } - - // try to reach desired timeout by increasing p - // the further away we are from timeout the bigger the steps should be - for dur < timeout { - // the theoretical optimal p; can not be used because of inaccurate measuring - optimalP := int(int64(timeout) / (int64(dur) / int64(p.P))) - - if optimalP > p.P+1 { - // use average between optimal p and current p - p.P = (p.P + optimalP) / 2 - } else { - p.P++ - } - - start = time.Now() - if _, err := scrypt.Key(password, salt, p.N, p.R, p.P, p.DKLen); err != nil { - return p, err - } - dur = time.Since(start) - } - // lower by one to get shorter duration than timeout - p.P-- - - return p, p.Check() -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a..000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f7..000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09..000000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index dea2617ce..000000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2cf..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41b3..000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index fa88add30..000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,607 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328bb7..000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa9194a..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe071..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index 79668ff5c..000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,545 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa95..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6a..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc52..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee725b..000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af2..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 70276e8f5..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,141 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { - return false - } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 78ee52334..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -package any - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} -} - -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) -} - -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 493294255..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595da7..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 26d1ca2fb..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index 0d681ee21..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -package duration - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} -} - -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} - -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce41a..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8da0df01a..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,132 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index 31cd846de..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -package timestamp - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} -} - -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index eafb3fa03..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE deleted file mode 100644 index 32017f8fa..000000000 --- a/vendor/github.com/google/go-cmp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2017 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go deleted file mode 100644 index 41bbddc61..000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package cmpopts provides common options for the cmp package. -package cmpopts - -import ( - "math" - "reflect" - - "github.com/google/go-cmp/cmp" -) - -func equateAlways(_, _ interface{}) bool { return true } - -// EquateEmpty returns a Comparer option that determines all maps and slices -// with a length of zero to be equal, regardless of whether they are nil. -// -// EquateEmpty can be used in conjunction with SortSlices and SortMaps. -func EquateEmpty() cmp.Option { - return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) -} - -func isEmpty(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - return (x != nil && y != nil && vx.Type() == vy.Type()) && - (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && - (vx.Len() == 0 && vy.Len() == 0) -} - -// EquateApprox returns a Comparer option that determines float32 or float64 -// values to be equal if they are within a relative fraction or absolute margin. -// This option is not used when either x or y is NaN or infinite. -// -// The fraction determines that the difference of two values must be within the -// smaller fraction of the two values, while the margin determines that the two -// values must be within some absolute margin. -// To express only a fraction or only a margin, use 0 for the other parameter. -// The fraction and margin must be non-negative. -// -// The mathematical expression used is equivalent to: -// |x-y| ≤ max(fraction*min(|x|, |y|), margin) -// -// EquateApprox can be used in conjunction with EquateNaNs. -func EquateApprox(fraction, margin float64) cmp.Option { - if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { - panic("margin or fraction must be a non-negative number") - } - a := approximator{fraction, margin} - return cmp.Options{ - cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)), - cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)), - } -} - -type approximator struct{ frac, marg float64 } - -func areRealF64s(x, y float64) bool { - return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0) -} -func areRealF32s(x, y float32) bool { - return areRealF64s(float64(x), float64(y)) -} -func (a approximator) compareF64(x, y float64) bool { - relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y)) - return math.Abs(x-y) <= math.Max(a.marg, relMarg) -} -func (a approximator) compareF32(x, y float32) bool { - return a.compareF64(float64(x), float64(y)) -} - -// EquateNaNs returns a Comparer option that determines float32 and float64 -// NaN values to be equal. -// -// EquateNaNs can be used in conjunction with EquateApprox. -func EquateNaNs() cmp.Option { - return cmp.Options{ - cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), - cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)), - } -} - -func areNaNsF64s(x, y float64) bool { - return math.IsNaN(x) && math.IsNaN(y) -} -func areNaNsF32s(x, y float32) bool { - return areNaNsF64s(float64(x), float64(y)) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go deleted file mode 100644 index e86554b92..000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmpopts - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp" -) - -// IgnoreFields returns an Option that ignores exported fields of the -// given names on a single struct type. -// The struct type is specified by passing in a value of that type. -// -// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a -// specific sub-field that is embedded or nested within the parent struct. -// -// This does not handle unexported fields; use IgnoreUnexported instead. -func IgnoreFields(typ interface{}, names ...string) cmp.Option { - sf := newStructFilter(typ, names...) - return cmp.FilterPath(sf.filter, cmp.Ignore()) -} - -// IgnoreTypes returns an Option that ignores all values assignable to -// certain types, which are specified by passing in a value of each type. -func IgnoreTypes(typs ...interface{}) cmp.Option { - tf := newTypeFilter(typs...) - return cmp.FilterPath(tf.filter, cmp.Ignore()) -} - -type typeFilter []reflect.Type - -func newTypeFilter(typs ...interface{}) (tf typeFilter) { - for _, typ := range typs { - t := reflect.TypeOf(typ) - if t == nil { - // This occurs if someone tries to pass in sync.Locker(nil) - panic("cannot determine type; consider using IgnoreInterfaces") - } - tf = append(tf, t) - } - return tf -} -func (tf typeFilter) filter(p cmp.Path) bool { - if len(p) < 1 { - return false - } - t := p.Last().Type() - for _, ti := range tf { - if t.AssignableTo(ti) { - return true - } - } - return false -} - -// IgnoreInterfaces returns an Option that ignores all values or references of -// values assignable to certain interface types. These interfaces are specified -// by passing in an anonymous struct with the interface types embedded in it. -// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}. -func IgnoreInterfaces(ifaces interface{}) cmp.Option { - tf := newIfaceFilter(ifaces) - return cmp.FilterPath(tf.filter, cmp.Ignore()) -} - -type ifaceFilter []reflect.Type - -func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) { - t := reflect.TypeOf(ifaces) - if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct { - panic("input must be an anonymous struct") - } - for i := 0; i < t.NumField(); i++ { - fi := t.Field(i) - switch { - case !fi.Anonymous: - panic("struct cannot have named fields") - case fi.Type.Kind() != reflect.Interface: - panic("embedded field must be an interface type") - case fi.Type.NumMethod() == 0: - // This matches everything; why would you ever want this? - panic("cannot ignore empty interface") - default: - tf = append(tf, fi.Type) - } - } - return tf -} -func (tf ifaceFilter) filter(p cmp.Path) bool { - if len(p) < 1 { - return false - } - t := p.Last().Type() - for _, ti := range tf { - if t.AssignableTo(ti) { - return true - } - if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) { - return true - } - } - return false -} - -// IgnoreUnexported returns an Option that only ignores the immediate unexported -// fields of a struct, including anonymous fields of unexported types. -// In particular, unexported fields within the struct's exported fields -// of struct types, including anonymous fields, will not be ignored unless the -// type of the field itself is also passed to IgnoreUnexported. -func IgnoreUnexported(typs ...interface{}) cmp.Option { - ux := newUnexportedFilter(typs...) - return cmp.FilterPath(ux.filter, cmp.Ignore()) -} - -type unexportedFilter struct{ m map[reflect.Type]bool } - -func newUnexportedFilter(typs ...interface{}) unexportedFilter { - ux := unexportedFilter{m: make(map[reflect.Type]bool)} - for _, typ := range typs { - t := reflect.TypeOf(typ) - if t == nil || t.Kind() != reflect.Struct { - panic(fmt.Sprintf("invalid struct type: %T", typ)) - } - ux.m[t] = true - } - return ux -} -func (xf unexportedFilter) filter(p cmp.Path) bool { - sf, ok := p.Index(-1).(cmp.StructField) - if !ok { - return false - } - return xf.m[p.Index(-2).Type()] && !isExported(sf.Name()) -} - -// isExported reports whether the identifier is exported. -func isExported(id string) bool { - r, _ := utf8.DecodeRuneInString(id) - return unicode.IsUpper(r) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go deleted file mode 100644 index da17d7469..000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmpopts - -import ( - "fmt" - "reflect" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/internal/function" -) - -// SortSlices returns a Transformer option that sorts all []V. -// The less function must be of the form "func(T, T) bool" which is used to -// sort any slice with element type V that is assignable to T. -// -// The less function must be: -// • Deterministic: less(x, y) == less(x, y) -// • Irreflexive: !less(x, x) -// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) -// -// The less function does not have to be "total". That is, if !less(x, y) and -// !less(y, x) for two elements x and y, their relative order is maintained. -// -// SortSlices can be used in conjunction with EquateEmpty. -func SortSlices(less interface{}) cmp.Option { - vf := reflect.ValueOf(less) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", less)) - } - ss := sliceSorter{vf.Type().In(0), vf} - return cmp.FilterValues(ss.filter, cmp.Transformer("Sort", ss.sort)) -} - -type sliceSorter struct { - in reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (ss sliceSorter) filter(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - if !(x != nil && y != nil && vx.Type() == vy.Type()) || - !(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) || - (vx.Len() <= 1 && vy.Len() <= 1) { - return false - } - // Check whether the slices are already sorted to avoid an infinite - // recursion cycle applying the same transform to itself. - ok1 := sliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) }) - ok2 := sliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) }) - return !ok1 || !ok2 -} -func (ss sliceSorter) sort(x interface{}) interface{} { - src := reflect.ValueOf(x) - dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len()) - for i := 0; i < src.Len(); i++ { - dst.Index(i).Set(src.Index(i)) - } - sortSliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) }) - ss.checkSort(dst) - return dst.Interface() -} -func (ss sliceSorter) checkSort(v reflect.Value) { - start := -1 // Start of a sequence of equal elements. - for i := 1; i < v.Len(); i++ { - if ss.less(v, i-1, i) { - // Check that first and last elements in v[start:i] are equal. - if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) { - panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i))) - } - start = -1 - } else if start == -1 { - start = i - } - } -} -func (ss sliceSorter) less(v reflect.Value, i, j int) bool { - vx, vy := v.Index(i), v.Index(j) - return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() -} - -// SortMaps returns a Transformer option that flattens map[K]V types to be a -// sorted []struct{K, V}. The less function must be of the form -// "func(T, T) bool" which is used to sort any map with key K that is -// assignable to T. -// -// Flattening the map into a slice has the property that cmp.Equal is able to -// use Comparers on K or the K.Equal method if it exists. -// -// The less function must be: -// • Deterministic: less(x, y) == less(x, y) -// • Irreflexive: !less(x, x) -// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) -// • Total: if x != y, then either less(x, y) or less(y, x) -// -// SortMaps can be used in conjunction with EquateEmpty. -func SortMaps(less interface{}) cmp.Option { - vf := reflect.ValueOf(less) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", less)) - } - ms := mapSorter{vf.Type().In(0), vf} - return cmp.FilterValues(ms.filter, cmp.Transformer("Sort", ms.sort)) -} - -type mapSorter struct { - in reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (ms mapSorter) filter(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - return (x != nil && y != nil && vx.Type() == vy.Type()) && - (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) && - (vx.Len() != 0 || vy.Len() != 0) -} -func (ms mapSorter) sort(x interface{}) interface{} { - src := reflect.ValueOf(x) - outType := mapEntryType(src.Type()) - dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len()) - for i, k := range src.MapKeys() { - v := reflect.New(outType).Elem() - v.Field(0).Set(k) - v.Field(1).Set(src.MapIndex(k)) - dst.Index(i).Set(v) - } - sortSlice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) }) - ms.checkSort(dst) - return dst.Interface() -} -func (ms mapSorter) checkSort(v reflect.Value) { - for i := 1; i < v.Len(); i++ { - if !ms.less(v, i-1, i) { - panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i))) - } - } -} -func (ms mapSorter) less(v reflect.Value, i, j int) bool { - vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) - if !hasReflectStructOf { - vx, vy = vx.Elem(), vy.Elem() - } - return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go deleted file mode 100644 index 839b88ca4..000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build !go1.8 - -package cmpopts - -import ( - "reflect" - "sort" -) - -const hasReflectStructOf = false - -func mapEntryType(reflect.Type) reflect.Type { - return reflect.TypeOf(struct{ K, V interface{} }{}) -} - -func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool { - return sort.IsSorted(reflectSliceSorter{reflect.ValueOf(slice), less}) -} -func sortSlice(slice interface{}, less func(i, j int) bool) { - sort.Sort(reflectSliceSorter{reflect.ValueOf(slice), less}) -} -func sortSliceStable(slice interface{}, less func(i, j int) bool) { - sort.Stable(reflectSliceSorter{reflect.ValueOf(slice), less}) -} - -type reflectSliceSorter struct { - slice reflect.Value - less func(i, j int) bool -} - -func (ss reflectSliceSorter) Len() int { - return ss.slice.Len() -} -func (ss reflectSliceSorter) Less(i, j int) bool { - return ss.less(i, j) -} -func (ss reflectSliceSorter) Swap(i, j int) { - vi := ss.slice.Index(i).Interface() - vj := ss.slice.Index(j).Interface() - ss.slice.Index(i).Set(reflect.ValueOf(vj)) - ss.slice.Index(j).Set(reflect.ValueOf(vi)) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go deleted file mode 100644 index 8a59c0d38..000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build go1.8 - -package cmpopts - -import ( - "reflect" - "sort" -) - -const hasReflectStructOf = true - -func mapEntryType(t reflect.Type) reflect.Type { - return reflect.StructOf([]reflect.StructField{ - {Name: "K", Type: t.Key()}, - {Name: "V", Type: t.Elem()}, - }) -} - -func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool { - return sort.SliceIsSorted(slice, less) -} -func sortSlice(slice interface{}, less func(i, j int) bool) { - sort.Slice(slice, less) -} -func sortSliceStable(slice interface{}, less func(i, j int) bool) { - sort.SliceStable(slice, less) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go deleted file mode 100644 index 97f707983..000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmpopts - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp" -) - -// filterField returns a new Option where opt is only evaluated on paths that -// include a specific exported field on a single struct type. -// The struct type is specified by passing in a value of that type. -// -// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a -// specific sub-field that is embedded or nested within the parent struct. -func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option { - // TODO: This is currently unexported over concerns of how helper filters - // can be composed together easily. - // TODO: Add tests for FilterField. - - sf := newStructFilter(typ, name) - return cmp.FilterPath(sf.filter, opt) -} - -type structFilter struct { - t reflect.Type // The root struct type to match on - ft fieldTree // Tree of fields to match on -} - -func newStructFilter(typ interface{}, names ...string) structFilter { - // TODO: Perhaps allow * as a special identifier to allow ignoring any - // number of path steps until the next field match? - // This could be useful when a concrete struct gets transformed into - // an anonymous struct where it is not possible to specify that by type, - // but the transformer happens to provide guarantees about the names of - // the transformed fields. - - t := reflect.TypeOf(typ) - if t == nil || t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T must be a struct", typ)) - } - var ft fieldTree - for _, name := range names { - cname, err := canonicalName(t, name) - if err != nil { - panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err)) - } - ft.insert(cname) - } - return structFilter{t, ft} -} - -func (sf structFilter) filter(p cmp.Path) bool { - for i, ps := range p { - if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) { - return true - } - } - return false -} - -// fieldTree represents a set of dot-separated identifiers. -// -// For example, inserting the following selectors: -// Foo -// Foo.Bar.Baz -// Foo.Buzz -// Nuka.Cola.Quantum -// -// Results in a tree of the form: -// {sub: { -// "Foo": {ok: true, sub: { -// "Bar": {sub: { -// "Baz": {ok: true}, -// }}, -// "Buzz": {ok: true}, -// }}, -// "Nuka": {sub: { -// "Cola": {sub: { -// "Quantum": {ok: true}, -// }}, -// }}, -// }} -type fieldTree struct { - ok bool // Whether this is a specified node - sub map[string]fieldTree // The sub-tree of fields under this node -} - -// insert inserts a sequence of field accesses into the tree. -func (ft *fieldTree) insert(cname []string) { - if ft.sub == nil { - ft.sub = make(map[string]fieldTree) - } - if len(cname) == 0 { - ft.ok = true - return - } - sub := ft.sub[cname[0]] - sub.insert(cname[1:]) - ft.sub[cname[0]] = sub -} - -// matchPrefix reports whether any selector in the fieldTree matches -// the start of path p. -func (ft fieldTree) matchPrefix(p cmp.Path) bool { - for _, ps := range p { - switch ps := ps.(type) { - case cmp.StructField: - ft = ft.sub[ps.Name()] - if ft.ok { - return true - } - if len(ft.sub) == 0 { - return false - } - case cmp.Indirect: - default: - return false - } - } - return false -} - -// canonicalName returns a list of identifiers where any struct field access -// through an embedded field is expanded to include the names of the embedded -// types themselves. -// -// For example, suppose field "Foo" is not directly in the parent struct, -// but actually from an embedded struct of type "Bar". Then, the canonical name -// of "Foo" is actually "Bar.Foo". -// -// Suppose field "Foo" is not directly in the parent struct, but actually -// a field in two different embedded structs of types "Bar" and "Baz". -// Then the selector "Foo" causes a panic since it is ambiguous which one it -// refers to. The user must specify either "Bar.Foo" or "Baz.Foo". -func canonicalName(t reflect.Type, sel string) ([]string, error) { - var name string - sel = strings.TrimPrefix(sel, ".") - if sel == "" { - return nil, fmt.Errorf("name must not be empty") - } - if i := strings.IndexByte(sel, '.'); i < 0 { - name, sel = sel, "" - } else { - name, sel = sel[:i], sel[i:] - } - - // Type must be a struct or pointer to struct. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("%v must be a struct", t) - } - - // Find the canonical name for this current field name. - // If the field exists in an embedded struct, then it will be expanded. - if !isExported(name) { - // Disallow unexported fields: - // * To discourage people from actually touching unexported fields - // * FieldByName is buggy (https://golang.org/issue/4876) - return []string{name}, fmt.Errorf("name must be exported") - } - sf, ok := t.FieldByName(name) - if !ok { - return []string{name}, fmt.Errorf("does not exist") - } - var ss []string - for i := range sf.Index { - ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name) - } - if sel == "" { - return ss, nil - } - ssPost, err := canonicalName(sf.Type, sel) - return append(ss, ssPost...), err -} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go deleted file mode 100644 index 7e215f220..000000000 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package cmp determines equality of values. -// -// This package is intended to be a more powerful and safer alternative to -// reflect.DeepEqual for comparing whether two values are semantically equal. -// -// The primary features of cmp are: -// -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. -// -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. -// -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared -// using the AllowUnexported option. -package cmp - -import ( - "fmt" - "reflect" - - "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/function" - "github.com/google/go-cmp/cmp/internal/value" -) - -// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to -// the reflection package's inability to retrieve such entries. Equal will panic -// anytime it comes across a NaN key, but this behavior may change. -// -// See https://golang.org/issue/11104 for more details. - -var nothing = reflect.Value{} - -// Equal reports whether x and y are equal by recursively applying the -// following rules in the given order to x and y and all of their sub-values: -// -// • If two values are not of the same type, then they are never equal -// and the overall result is false. -// -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. -// -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. -// Otherwise, no such method exists and evaluation proceeds to the next rule. -// -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. -// Pointers are equal if the underlying values they point to are also equal. -// Interfaces are equal if their underlying concrete values are also equal. -// -// Structs are equal if all of their fields are equal. If a struct contains -// unexported fields, Equal panics unless the AllowUnexported option is used or -// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field. -// -// Arrays, slices, and maps are equal if they are both nil or both non-nil -// with the same length and the elements at each index or key are equal. -// Note that a non-nil empty slice and a nil slice are not equal. -// To equate empty slices and maps, consider using cmpopts.EquateEmpty. -// Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using cmpopts.SortMaps. -func Equal(x, y interface{}, opts ...Option) bool { - s := newState(opts) - s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y)) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. The output string will use the "-" symbol to -// indicate elements removed from x, and the "+" symbol to indicate elements -// added to y. -// -// Do not depend on this output being stable. -func Diff(x, y interface{}, opts ...Option) string { - r := new(defaultReporter) - opts = Options{Options(opts), r} - eq := Equal(x, y, opts...) - d := r.String() - if (d == "") != eq { - panic("inconsistent difference and equality results") - } - return d -} - -type state struct { - // These fields represent the "comparison state". - // Calling statelessCompare must not result in observable changes to these. - result diff.Result // The current result of comparison - curPath Path // The current path in the value tree - reporter reporter // Optional reporter used for difference formatting - - // dynChecker triggers pseudo-random checks for option correctness. - // It is safe for statelessCompare to mutate this value. - dynChecker dynChecker - - // These fields, once set by processOption, will not change. - exporters map[reflect.Type]bool // Set of structs with unexported field visibility - opts Options // List of all fundamental and filter options -} - -func newState(opts []Option) *state { - s := new(state) - for _, opt := range opts { - s.processOption(opt) - } - return s -} - -func (s *state) processOption(opt Option) { - switch opt := opt.(type) { - case nil: - case Options: - for _, o := range opt { - s.processOption(o) - } - case coreOption: - type filtered interface { - isFiltered() bool - } - if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { - panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) - } - s.opts = append(s.opts, opt) - case visibleStructs: - if s.exporters == nil { - s.exporters = make(map[reflect.Type]bool) - } - for t := range opt { - s.exporters[t] = true - } - case reporter: - if s.reporter != nil { - panic("difference reporter already registered") - } - s.reporter = opt - default: - panic(fmt.Sprintf("unknown option %T", opt)) - } -} - -// statelessCompare compares two values and returns the result. -// This function is stateless in that it does not alter the current result, -// or output to any registered reporters. -func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result { - // We do not save and restore the curPath because all of the compareX - // methods should properly push and pop from the path. - // It is an implementation bug if the contents of curPath differs from - // when calling this function to when returning from it. - - oldResult, oldReporter := s.result, s.reporter - s.result = diff.Result{} // Reset result - s.reporter = nil // Remove reporter to avoid spurious printouts - s.compareAny(vx, vy) - res := s.result - s.result, s.reporter = oldResult, oldReporter - return res -} - -func (s *state) compareAny(vx, vy reflect.Value) { - // TODO: Support cyclic data structures. - - // Rule 0: Differing types are never equal. - if !vx.IsValid() || !vy.IsValid() { - s.report(vx.IsValid() == vy.IsValid(), vx, vy) - return - } - if vx.Type() != vy.Type() { - s.report(false, vx, vy) // Possible for path to be empty - return - } - t := vx.Type() - if len(s.curPath) == 0 { - s.curPath.push(&pathStep{typ: t}) - defer s.curPath.pop() - } - vx, vy = s.tryExporting(vx, vy) - - // Rule 1: Check whether an option applies on this node in the value tree. - if s.tryOptions(vx, vy, t) { - return - } - - // Rule 2: Check whether the type has a valid Equal method. - if s.tryMethod(vx, vy, t) { - return - } - - // Rule 3: Recursively descend into each value's underlying kind. - switch t.Kind() { - case reflect.Bool: - s.report(vx.Bool() == vy.Bool(), vx, vy) - return - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s.report(vx.Int() == vy.Int(), vx, vy) - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s.report(vx.Uint() == vy.Uint(), vx, vy) - return - case reflect.Float32, reflect.Float64: - s.report(vx.Float() == vy.Float(), vx, vy) - return - case reflect.Complex64, reflect.Complex128: - s.report(vx.Complex() == vy.Complex(), vx, vy) - return - case reflect.String: - s.report(vx.String() == vy.String(), vx, vy) - return - case reflect.Chan, reflect.UnsafePointer: - s.report(vx.Pointer() == vy.Pointer(), vx, vy) - return - case reflect.Func: - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - case reflect.Ptr: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - s.curPath.push(&indirect{pathStep{t.Elem()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return - case reflect.Interface: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - if vx.Elem().Type() != vy.Elem().Type() { - s.report(false, vx.Elem(), vy.Elem()) - return - } - s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return - case reflect.Slice: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - fallthrough - case reflect.Array: - s.compareArray(vx, vy, t) - return - case reflect.Map: - s.compareMap(vx, vy, t) - return - case reflect.Struct: - s.compareStruct(vx, vy, t) - return - default: - panic(fmt.Sprintf("%v kind not handled", t.Kind())) - } -} - -func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) { - if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported { - if sf.force { - // Use unsafe pointer arithmetic to get read-write access to an - // unexported field in the struct. - vx = unsafeRetrieveField(sf.pvx, sf.field) - vy = unsafeRetrieveField(sf.pvy, sf.field) - } else { - // We are not allowed to export the value, so invalidate them - // so that tryOptions can panic later if not explicitly ignored. - vx = nothing - vy = nothing - } - } - return vx, vy -} - -func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool { - // If there were no FilterValues, we will not detect invalid inputs, - // so manually check for them and append invalid if necessary. - // We still evaluate the options since an ignore can override invalid. - opts := s.opts - if !vx.IsValid() || !vy.IsValid() { - opts = Options{opts, invalid{}} - } - - // Evaluate all filters and apply the remaining options. - if opt := opts.filter(s, vx, vy, t); opt != nil { - opt.apply(s, vx, vy) - return true - } - return false -} - -func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { - // Check if this type even has an Equal method. - m, ok := t.MethodByName("Equal") - if !ok || !function.IsType(m.Type, function.EqualAssignable) { - return false - } - - eq := s.callTTBFunc(m.Func, vx, vy) - s.report(eq, vx, vy) - return true -} - -func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{v})[0] - } - - // Run the function twice and ensure that we get the same results back. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, v) - want := f.Call([]reflect.Value{v})[0] - if got := <-c; !s.statelessCompare(got, want).Equal() { - // To avoid false-positives with non-reflexive equality operations, - // we sanity check whether a value is equal to itself. - if !s.statelessCompare(want, want).Equal() { - return want - } - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic function detected: %s", fn)) - } - return want -} - -func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{x, y})[0].Bool() - } - - // Swapping the input arguments is sufficient to check that - // f is symmetric and deterministic. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, y, x) - want := f.Call([]reflect.Value{x, y})[0].Bool() - if got := <-c; !got.IsValid() || got.Bool() != want { - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn)) - } - return want -} - -func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { - var ret reflect.Value - defer func() { - recover() // Ignore panics, let the other call to f panic instead - c <- ret - }() - ret = f.Call(vs)[0] -} - -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Remove this hacky workaround. - // See https://golang.org/issue/22143 - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - return v -} - -func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) { - step := &sliceIndex{pathStep{t.Elem()}, 0, 0} - s.curPath.push(step) - - // Compute an edit-script for slices vx and vy. - es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { - step.xkey, step.ykey = ix, iy - return s.statelessCompare(vx.Index(ix), vy.Index(iy)) - }) - - // Report the entire slice as is if the arrays are of primitive kind, - // and the arrays are different enough. - isPrimitive := false - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - isPrimitive = true - } - if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 { - s.curPath.pop() // Pop first since we are reporting the whole slice - s.report(false, vx, vy) - return - } - - // Replay the edit-script. - var ix, iy int - for _, e := range es { - switch e { - case diff.UniqueX: - step.xkey, step.ykey = ix, -1 - s.report(false, vx.Index(ix), nothing) - ix++ - case diff.UniqueY: - step.xkey, step.ykey = -1, iy - s.report(false, nothing, vy.Index(iy)) - iy++ - default: - step.xkey, step.ykey = ix, iy - if e == diff.Identity { - s.report(true, vx.Index(ix), vy.Index(iy)) - } else { - s.compareAny(vx.Index(ix), vy.Index(iy)) - } - ix++ - iy++ - } - } - s.curPath.pop() - return -} - -func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - - // We combine and sort the two map keys so that we can perform the - // comparisons in a deterministic order. - step := &mapIndex{pathStep: pathStep{t.Elem()}} - s.curPath.push(step) - defer s.curPath.pop() - for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { - step.key = k - vvx := vx.MapIndex(k) - vvy := vy.MapIndex(k) - switch { - case vvx.IsValid() && vvy.IsValid(): - s.compareAny(vvx, vvy) - case vvx.IsValid() && !vvy.IsValid(): - s.report(false, vvx, nothing) - case !vvx.IsValid() && vvy.IsValid(): - s.report(false, nothing, vvy) - default: - // It is possible for both vvx and vvy to be invalid if the - // key contained a NaN value in it. There is no way in - // reflection to be able to retrieve these values. - // See https://golang.org/issue/11104 - panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath)) - } - } -} - -func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) { - var vax, vay reflect.Value // Addressable versions of vx and vy - - step := &structField{} - s.curPath.push(step) - defer s.curPath.pop() - for i := 0; i < t.NumField(); i++ { - vvx := vx.Field(i) - vvy := vy.Field(i) - step.typ = t.Field(i).Type - step.name = t.Field(i).Name - step.idx = i - step.unexported = !isExported(step.name) - if step.unexported { - // Defer checking of unexported fields until later to give an - // Ignore a chance to ignore the field. - if !vax.IsValid() || !vay.IsValid() { - // For unsafeRetrieveField to work, the parent struct must - // be addressable. Create a new copy of the values if - // necessary to make them addressable. - vax = makeAddressable(vx) - vay = makeAddressable(vy) - } - step.force = s.exporters[t] - step.pvx = vax - step.pvy = vay - step.field = t.Field(i) - } - s.compareAny(vvx, vvy) - } -} - -// report records the result of a single comparison. -// It also calls Report if any reporter is registered. -func (s *state) report(eq bool, vx, vy reflect.Value) { - if eq { - s.result.NSame++ - } else { - s.result.NDiff++ - } - if s.reporter != nil { - s.reporter.Report(vx, vy, eq, s.curPath) - } -} - -// dynChecker tracks the state needed to periodically perform checks that -// user provided functions are symmetric and deterministic. -// The zero value is safe for immediate use. -type dynChecker struct{ curr, next int } - -// Next increments the state and reports whether a check should be performed. -// -// Checks occur every Nth function call, where N is a triangular number: -// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... -// See https://en.wikipedia.org/wiki/Triangular_number -// -// This sequence ensures that the cost of checks drops significantly as -// the number of functions calls grows larger. -func (dc *dynChecker) Next() bool { - ok := dc.curr == dc.next - if ok { - dc.curr = 0 - dc.next++ - } - dc.curr++ - return ok -} - -// makeAddressable returns a value that is always addressable. -// It returns the input verbatim if it is already addressable, -// otherwise it creates a new value and returns an addressable copy. -func makeAddressable(v reflect.Value) reflect.Value { - if v.CanAddr() { - return v - } - vc := reflect.New(v.Type()).Elem() - vc.Set(v) - return vc -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go deleted file mode 100644 index 42afa4960..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build !debug - -package diff - -var debug debugger - -type debugger struct{} - -func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { - return f -} -func (debugger) Update() {} -func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go deleted file mode 100644 index fd9f7f177..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build debug - -package diff - -import ( - "fmt" - "strings" - "sync" - "time" -) - -// The algorithm can be seen running in real-time by enabling debugging: -// go test -tags=debug -v -// -// Example output: -// === RUN TestDifference/#34 -// ┌───────────────────────────────┐ -// │ \ · · · · · · · · · · · · · · │ -// │ · # · · · · · · · · · · · · · │ -// │ · \ · · · · · · · · · · · · · │ -// │ · · \ · · · · · · · · · · · · │ -// │ · · · X # · · · · · · · · · · │ -// │ · · · # \ · · · · · · · · · · │ -// │ · · · · · # # · · · · · · · · │ -// │ · · · · · # \ · · · · · · · · │ -// │ · · · · · · · \ · · · · · · · │ -// │ · · · · · · · · \ · · · · · · │ -// │ · · · · · · · · · \ · · · · · │ -// │ · · · · · · · · · · \ · · # · │ -// │ · · · · · · · · · · · \ # # · │ -// │ · · · · · · · · · · · # # # · │ -// │ · · · · · · · · · · # # # # · │ -// │ · · · · · · · · · # # # # # · │ -// │ · · · · · · · · · · · · · · \ │ -// └───────────────────────────────┘ -// [.Y..M.XY......YXYXY.|] -// -// The grid represents the edit-graph where the horizontal axis represents -// list X and the vertical axis represents list Y. The start of the two lists -// is the top-left, while the ends are the bottom-right. The '·' represents -// an unexplored node in the graph. The '\' indicates that the two symbols -// from list X and Y are equal. The 'X' indicates that two symbols are similar -// (but not exactly equal) to each other. The '#' indicates that the two symbols -// are different (and not similar). The algorithm traverses this graph trying to -// make the paths starting in the top-left and the bottom-right connect. -// -// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents -// the currently established path from the forward and reverse searches, -// separated by a '|' character. - -const ( - updateDelay = 100 * time.Millisecond - finishDelay = 500 * time.Millisecond - ansiTerminal = true // ANSI escape codes used to move terminal cursor -) - -var debug debugger - -type debugger struct { - sync.Mutex - p1, p2 EditScript - fwdPath, revPath *EditScript - grid []byte - lines int -} - -func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { - dbg.Lock() - dbg.fwdPath, dbg.revPath = p1, p2 - top := "┌─" + strings.Repeat("──", nx) + "┐\n" - row := "│ " + strings.Repeat("· ", nx) + "│\n" - btm := "└─" + strings.Repeat("──", nx) + "┘\n" - dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) - dbg.lines = strings.Count(dbg.String(), "\n") - fmt.Print(dbg) - - // Wrap the EqualFunc so that we can intercept each result. - return func(ix, iy int) (r Result) { - cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] - for i := range cell { - cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot - } - switch r = f(ix, iy); { - case r.Equal(): - cell[0] = '\\' - case r.Similar(): - cell[0] = 'X' - default: - cell[0] = '#' - } - return - } -} - -func (dbg *debugger) Update() { - dbg.print(updateDelay) -} - -func (dbg *debugger) Finish() { - dbg.print(finishDelay) - dbg.Unlock() -} - -func (dbg *debugger) String() string { - dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] - for i := len(*dbg.revPath) - 1; i >= 0; i-- { - dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) - } - return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) -} - -func (dbg *debugger) print(d time.Duration) { - if ansiTerminal { - fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor - } - fmt.Print(dbg) - time.Sleep(d) -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go deleted file mode 100644 index 260befea2..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package diff implements an algorithm for producing edit-scripts. -// The edit-script is a sequence of operations needed to transform one list -// of symbols into another (or vice-versa). The edits allowed are insertions, -// deletions, and modifications. The summation of all edits is called the -// Levenshtein distance as this problem is well-known in computer science. -// -// This package prioritizes performance over accuracy. That is, the run time -// is more important than obtaining a minimal Levenshtein distance. -package diff - -// EditType represents a single operation within an edit-script. -type EditType uint8 - -const ( - // Identity indicates that a symbol pair is identical in both list X and Y. - Identity EditType = iota - // UniqueX indicates that a symbol only exists in X and not Y. - UniqueX - // UniqueY indicates that a symbol only exists in Y and not X. - UniqueY - // Modified indicates that a symbol pair is a modification of each other. - Modified -) - -// EditScript represents the series of differences between two lists. -type EditScript []EditType - -// String returns a human-readable string representing the edit-script where -// Identity, UniqueX, UniqueY, and Modified are represented by the -// '.', 'X', 'Y', and 'M' characters, respectively. -func (es EditScript) String() string { - b := make([]byte, len(es)) - for i, e := range es { - switch e { - case Identity: - b[i] = '.' - case UniqueX: - b[i] = 'X' - case UniqueY: - b[i] = 'Y' - case Modified: - b[i] = 'M' - default: - panic("invalid edit-type") - } - } - return string(b) -} - -// stats returns a histogram of the number of each type of edit operation. -func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { - for _, e := range es { - switch e { - case Identity: - s.NI++ - case UniqueX: - s.NX++ - case UniqueY: - s.NY++ - case Modified: - s.NM++ - default: - panic("invalid edit-type") - } - } - return -} - -// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if -// lists X and Y are equal. -func (es EditScript) Dist() int { return len(es) - es.stats().NI } - -// LenX is the length of the X list. -func (es EditScript) LenX() int { return len(es) - es.stats().NY } - -// LenY is the length of the Y list. -func (es EditScript) LenY() int { return len(es) - es.stats().NX } - -// EqualFunc reports whether the symbols at indexes ix and iy are equal. -// When called by Difference, the index is guaranteed to be within nx and ny. -type EqualFunc func(ix int, iy int) Result - -// Result is the result of comparison. -// NSame is the number of sub-elements that are equal. -// NDiff is the number of sub-elements that are not equal. -type Result struct{ NSame, NDiff int } - -// Equal indicates whether the symbols are equal. Two symbols are equal -// if and only if NDiff == 0. If Equal, then they are also Similar. -func (r Result) Equal() bool { return r.NDiff == 0 } - -// Similar indicates whether two symbols are similar and may be represented -// by using the Modified type. As a special case, we consider binary comparisons -// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. -// -// The exact ratio of NSame to NDiff to determine similarity may change. -func (r Result) Similar() bool { - // Use NSame+1 to offset NSame so that binary comparisons are similar. - return r.NSame+1 >= r.NDiff -} - -// Difference reports whether two lists of lengths nx and ny are equal -// given the definition of equality provided as f. -// -// This function returns an edit-script, which is a sequence of operations -// needed to convert one list into the other. The following invariants for -// the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() -// -// This algorithm is not guaranteed to be an optimal solution (i.e., one that -// produces an edit-script with a minimal Levenshtein distance). This algorithm -// favors performance over optimality. The exact output is not guaranteed to -// be stable and may change over time. -func Difference(nx, ny int, f EqualFunc) (es EditScript) { - // This algorithm is based on traversing what is known as an "edit-graph". - // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" - // by Eugene W. Myers. Since D can be as large as N itself, this is - // effectively O(N^2). Unlike the algorithm from that paper, we are not - // interested in the optimal path, but at least some "decent" path. - // - // For example, let X and Y be lists of symbols: - // X = [A B C A B B A] - // Y = [C B A B A C] - // - // The edit-graph can be drawn as the following: - // A B C A B B A - // ┌─────────────┐ - // C │_|_|\|_|_|_|_│ 0 - // B │_|\|_|_|\|\|_│ 1 - // A │\|_|_|\|_|_|\│ 2 - // B │_|\|_|_|\|\|_│ 3 - // A │\|_|_|\|_|_|\│ 4 - // C │ | |\| | | | │ 5 - // └─────────────┘ 6 - // 0 1 2 3 4 5 6 7 - // - // List X is written along the horizontal axis, while list Y is written - // along the vertical axis. At any point on this grid, if the symbol in - // list X matches the corresponding symbol in list Y, then a '\' is drawn. - // The goal of any minimal edit-script algorithm is to find a path from the - // top-left corner to the bottom-right corner, while traveling through the - // fewest horizontal or vertical edges. - // A horizontal edge is equivalent to inserting a symbol from list X. - // A vertical edge is equivalent to inserting a symbol from list Y. - // A diagonal edge is equivalent to a matching symbol between both X and Y. - - // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny - // - // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y - // Unless, it is time for the algorithm to terminate. - fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} - revPath := path{-1, point{nx, ny}, make(EditScript, 0)} - fwdFrontier := fwdPath.point // Forward search frontier - revFrontier := revPath.point // Reverse search frontier - - // Search budget bounds the cost of searching for better paths. - // The longest sequence of non-matching symbols that can be tolerated is - // approximately the square-root of the search budget. - searchBudget := 4 * (nx + ny) // O(n) - - // The algorithm below is a greedy, meet-in-the-middle algorithm for - // computing sub-optimal edit-scripts between two lists. - // - // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. - // - // This algorithm is correct even if searching only in the forward direction - // or in the reverse direction. We do both because it is commonly observed - // that two lists commonly differ because elements were added to the front - // or end of the other list. - // - // Running the tests with the "debug" build tag prints a visualization of - // the algorithm running in real-time. This is educational for understanding - // how the algorithm works. See debug_enable.go. - f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) - for { - // Forward search from the beginning. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{fwdFrontier.X + z, fwdFrontier.Y - z} - switch { - case p.X >= revPath.X || p.Y < fwdPath.Y: - stop1 = true // Hit top-right corner - case p.Y >= revPath.Y || p.X < fwdPath.X: - stop2 = true // Hit bottom-left corner - case f(p.X, p.Y).Equal(): - // Match found, so connect the path to this point. - fwdPath.connect(p, f) - fwdPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(fwdPath.X, fwdPath.Y).Equal() { - break - } - fwdPath.append(Identity) - } - fwdFrontier = fwdPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards reverse point. - if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { - fwdFrontier.X++ - } else { - fwdFrontier.Y++ - } - - // Reverse search from the end. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{revFrontier.X - z, revFrontier.Y + z} - switch { - case fwdPath.X >= p.X || revPath.Y < p.Y: - stop1 = true // Hit bottom-left corner - case fwdPath.Y >= p.Y || revPath.X < p.X: - stop2 = true // Hit top-right corner - case f(p.X-1, p.Y-1).Equal(): - // Match found, so connect the path to this point. - revPath.connect(p, f) - revPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(revPath.X-1, revPath.Y-1).Equal() { - break - } - revPath.append(Identity) - } - revFrontier = revPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards forward point. - if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { - revFrontier.X-- - } else { - revFrontier.Y-- - } - } - - // Join the forward and reverse paths and then append the reverse path. - fwdPath.connect(revPath.point, f) - for i := len(revPath.es) - 1; i >= 0; i-- { - t := revPath.es[i] - revPath.es = revPath.es[:i] - fwdPath.append(t) - } - debug.Finish() - return fwdPath.es -} - -type path struct { - dir int // +1 if forward, -1 if reverse - point // Leading point of the EditScript path - es EditScript -} - -// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types -// to the edit-script to connect p.point to dst. -func (p *path) connect(dst point, f EqualFunc) { - if p.dir > 0 { - // Connect in forward direction. - for dst.X > p.X && dst.Y > p.Y { - switch r := f(p.X, p.Y); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case dst.X-p.X >= dst.Y-p.Y: - p.append(UniqueX) - default: - p.append(UniqueY) - } - } - for dst.X > p.X { - p.append(UniqueX) - } - for dst.Y > p.Y { - p.append(UniqueY) - } - } else { - // Connect in reverse direction. - for p.X > dst.X && p.Y > dst.Y { - switch r := f(p.X-1, p.Y-1); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case p.Y-dst.Y >= p.X-dst.X: - p.append(UniqueY) - default: - p.append(UniqueX) - } - } - for p.X > dst.X { - p.append(UniqueX) - } - for p.Y > dst.Y { - p.append(UniqueY) - } - } -} - -func (p *path) append(t EditType) { - p.es = append(p.es, t) - switch t { - case Identity, Modified: - p.add(p.dir, p.dir) - case UniqueX: - p.add(p.dir, 0) - case UniqueY: - p.add(0, p.dir) - } - debug.Update() -} - -type point struct{ X, Y int } - -func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } - -// zigzag maps a consecutive sequence of integers to a zig-zag sequence. -// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] -func zigzag(x int) int { - if x&1 != 0 { - x = ^x - } - return x >> 1 -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go deleted file mode 100644 index 4c35ff11e..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package function identifies function types. -package function - -import "reflect" - -type funcType int - -const ( - _ funcType = iota - - ttbFunc // func(T, T) bool - tibFunc // func(T, I) bool - trFunc // func(T) R - - Equal = ttbFunc // func(T, T) bool - EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool - Transformer = trFunc // func(T) R - ValueFilter = ttbFunc // func(T, T) bool - Less = ttbFunc // func(T, T) bool -) - -var boolType = reflect.TypeOf(true) - -// IsType reports whether the reflect.Type is of the specified function type. -func IsType(t reflect.Type, ft funcType) bool { - if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { - return false - } - ni, no := t.NumIn(), t.NumOut() - switch ft { - case ttbFunc: // func(T, T) bool - if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { - return true - } - case tibFunc: // func(T, I) bool - if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { - return true - } - case trFunc: // func(T) R - if ni == 1 && no == 1 { - return true - } - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go deleted file mode 100644 index 657e50877..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package value provides functionality for reflect.Value types. -package value - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "unicode" -) - -var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - -// Format formats the value v as a string. -// -// This is similar to fmt.Sprintf("%+v", v) except this: -// * Prints the type unless it can be elided -// * Avoids printing struct fields that are zero -// * Prints a nil-slice as being nil, not empty -// * Prints map entries in deterministic order -func Format(v reflect.Value, conf FormatConfig) string { - conf.printType = true - conf.followPointers = true - conf.realPointers = true - return formatAny(v, conf, nil) -} - -type FormatConfig struct { - UseStringer bool // Should the String method be used if available? - printType bool // Should we print the type before the value? - PrintPrimitiveType bool // Should we print the type of primitives? - followPointers bool // Should we recursively follow pointers? - realPointers bool // Should we print the real address of pointers? -} - -func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string { - // TODO: Should this be a multi-line printout in certain situations? - - if !v.IsValid() { - return "" - } - if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() { - if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() { - return "" - } - - const stringerPrefix = "s" // Indicates that the String method was used - s := v.Interface().(fmt.Stringer).String() - return stringerPrefix + formatString(s) - } - - switch v.Kind() { - case reflect.Bool: - return formatPrimitive(v.Type(), v.Bool(), conf) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return formatPrimitive(v.Type(), v.Int(), conf) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr { - // Unnamed uints are usually bytes or words, so use hexadecimal. - return formatPrimitive(v.Type(), formatHex(v.Uint()), conf) - } - return formatPrimitive(v.Type(), v.Uint(), conf) - case reflect.Float32, reflect.Float64: - return formatPrimitive(v.Type(), v.Float(), conf) - case reflect.Complex64, reflect.Complex128: - return formatPrimitive(v.Type(), v.Complex(), conf) - case reflect.String: - return formatPrimitive(v.Type(), formatString(v.String()), conf) - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return formatPointer(v, conf) - case reflect.Ptr: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("(%v)(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] || !conf.followPointers { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - return "&" + formatAny(v.Elem(), conf, visited) - case reflect.Interface: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - return formatAny(v.Elem(), conf, visited) - case reflect.Slice: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - fallthrough - case reflect.Array: - var ss []string - subConf := conf - subConf.printType = v.Type().Elem().Kind() == reflect.Interface - for i := 0; i < v.Len(); i++ { - s := formatAny(v.Index(i), subConf, visited) - ss = append(ss, s) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Map: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - - var ss []string - keyConf, valConf := conf, conf - keyConf.printType = v.Type().Key().Kind() == reflect.Interface - keyConf.followPointers = false - valConf.printType = v.Type().Elem().Kind() == reflect.Interface - for _, k := range SortKeys(v.MapKeys()) { - sk := formatAny(k, keyConf, visited) - sv := formatAny(v.MapIndex(k), valConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", sk, sv)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Struct: - var ss []string - subConf := conf - subConf.printType = true - for i := 0; i < v.NumField(); i++ { - vv := v.Field(i) - if isZero(vv) { - continue // Elide zero value fields - } - name := v.Type().Field(i).Name - subConf.UseStringer = conf.UseStringer - s := formatAny(vv, subConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", name, s)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - default: - panic(fmt.Sprintf("%v kind not handled", v.Kind())) - } -} - -func formatString(s string) string { - // Use quoted string if it the same length as a raw string literal. - // Otherwise, attempt to use the raw string form. - qs := strconv.Quote(s) - if len(qs) == 1+len(s)+1 { - return qs - } - - // Disallow newlines to ensure output is a single line. - // Only allow printable runes for readability purposes. - rawInvalid := func(r rune) bool { - return r == '`' || r == '\n' || !unicode.IsPrint(r) - } - if strings.IndexFunc(s, rawInvalid) < 0 { - return "`" + s + "`" - } - return qs -} - -func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string { - if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") { - return fmt.Sprintf("%v(%v)", t, v) - } - return fmt.Sprintf("%v", v) -} - -func formatPointer(v reflect.Value, conf FormatConfig) string { - p := v.Pointer() - if !conf.realPointers { - p = 0 // For deterministic printing purposes - } - s := formatHex(uint64(p)) - if conf.printType { - return fmt.Sprintf("(%v)(%s)", v.Type(), s) - } - return s -} - -func formatHex(u uint64) string { - var f string - switch { - case u <= 0xff: - f = "0x%02x" - case u <= 0xffff: - f = "0x%04x" - case u <= 0xffffff: - f = "0x%06x" - case u <= 0xffffffff: - f = "0x%08x" - case u <= 0xffffffffff: - f = "0x%010x" - case u <= 0xffffffffffff: - f = "0x%012x" - case u <= 0xffffffffffffff: - f = "0x%014x" - case u <= 0xffffffffffffffff: - f = "0x%016x" - } - return fmt.Sprintf(f, u) -} - -// insertPointer insert p into m, allocating m if necessary. -func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool { - if m == nil { - m = make(map[uintptr]bool) - } - m[p] = true - return m -} - -// isZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go deleted file mode 100644 index fe8aa27a0..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package value - -import ( - "fmt" - "math" - "reflect" - "sort" -) - -// SortKeys sorts a list of map keys, deduplicating keys if necessary. -// The type of each value must be comparable. -func SortKeys(vs []reflect.Value) []reflect.Value { - if len(vs) == 0 { - return vs - } - - // Sort the map keys. - sort.Sort(valueSorter(vs)) - - // Deduplicate keys (fails for NaNs). - vs2 := vs[:1] - for _, v := range vs[1:] { - if isLess(vs2[len(vs2)-1], v) { - vs2 = append(vs2, v) - } - } - return vs2 -} - -// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above. -type valueSorter []reflect.Value - -func (vs valueSorter) Len() int { return len(vs) } -func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) } -func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } - -// isLess is a generic function for sorting arbitrary map keys. -// The inputs must be of the same type and must be comparable. -func isLess(x, y reflect.Value) bool { - switch x.Type().Kind() { - case reflect.Bool: - return !x.Bool() && y.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return x.Int() < y.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return x.Uint() < y.Uint() - case reflect.Float32, reflect.Float64: - fx, fy := x.Float(), y.Float() - return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) - case reflect.Complex64, reflect.Complex128: - cx, cy := x.Complex(), y.Complex() - rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) - if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { - return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) - } - return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) - case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: - return x.Pointer() < y.Pointer() - case reflect.String: - return x.String() < y.String() - case reflect.Array: - for i := 0; i < x.Len(); i++ { - if isLess(x.Index(i), y.Index(i)) { - return true - } - if isLess(y.Index(i), x.Index(i)) { - return false - } - } - return false - case reflect.Struct: - for i := 0; i < x.NumField(); i++ { - if isLess(x.Field(i), y.Field(i)) { - return true - } - if isLess(y.Field(i), x.Field(i)) { - return false - } - } - return false - case reflect.Interface: - vx, vy := x.Elem(), y.Elem() - if !vx.IsValid() || !vy.IsValid() { - return !vx.IsValid() && vy.IsValid() - } - tx, ty := vx.Type(), vy.Type() - if tx == ty { - return isLess(x.Elem(), y.Elem()) - } - if tx.Kind() != ty.Kind() { - return vx.Kind() < vy.Kind() - } - if tx.String() != ty.String() { - return tx.String() < ty.String() - } - if tx.PkgPath() != ty.PkgPath() { - return tx.PkgPath() < ty.PkgPath() - } - // This can happen in rare situations, so we fallback to just comparing - // the unique pointer for a reflect.Type. This guarantees deterministic - // ordering within a program, but it is obviously not stable. - return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() - default: - // Must be Func, Map, or Slice; which are not comparable. - panic(fmt.Sprintf("%T is not comparable", x.Type())) - } -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go deleted file mode 100644 index 91d4b066e..000000000 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "runtime" - "strings" - - "github.com/google/go-cmp/cmp/internal/function" -) - -// Option configures for specific behavior of Equal and Diff. In particular, -// the fundamental Option functions (Ignore, Transformer, and Comparer), -// configure how equality is determined. -// -// The fundamental options may be composed with filters (FilterPath and -// FilterValues) to control the scope over which they are applied. -// -// The cmp/cmpopts package provides helper functions for creating options that -// may be used with Equal and Diff. -type Option interface { - // filter applies all filters and returns the option that remains. - // Each option may only read s.curPath and call s.callTTBFunc. - // - // An Options is returned only if multiple comparers or transformers - // can apply simultaneously and will only contain values of those types - // or sub-Options containing values of those types. - filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption -} - -// applicableOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer -// Grouping: Options -type applicableOption interface { - Option - - // apply executes the option, which may mutate s or panic. - apply(s *state, vx, vy reflect.Value) -} - -// coreOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer -// Filters: *pathFilter | *valuesFilter -type coreOption interface { - Option - isCore() -} - -type core struct{} - -func (core) isCore() {} - -// Options is a list of Option values that also satisfies the Option interface. -// Helper comparison packages may return an Options value when packing multiple -// Option values into a single Option. When this package processes an Options, -// it will be implicitly expanded into a flat list. -// -// Applying a filter on an Options is equivalent to applying that same filter -// on all individual options held within. -type Options []Option - -func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) { - for _, opt := range opts { - switch opt := opt.filter(s, vx, vy, t); opt.(type) { - case ignore: - return ignore{} // Only ignore can short-circuit evaluation - case invalid: - out = invalid{} // Takes precedence over comparer or transformer - case *comparer, *transformer, Options: - switch out.(type) { - case nil: - out = opt - case invalid: - // Keep invalid - case *comparer, *transformer, Options: - out = Options{out, opt} // Conflicting comparers or transformers - } - } - } - return out -} - -func (opts Options) apply(s *state, _, _ reflect.Value) { - const warning = "ambiguous set of applicable options" - const help = "consider using filters to ensure at most one Comparer or Transformer may apply" - var ss []string - for _, opt := range flattenOptions(nil, opts) { - ss = append(ss, fmt.Sprint(opt)) - } - set := strings.Join(ss, "\n\t") - panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) -} - -func (opts Options) String() string { - var ss []string - for _, opt := range opts { - ss = append(ss, fmt.Sprint(opt)) - } - return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) -} - -// FilterPath returns a new Option where opt is only evaluated if filter f -// returns true for the current Path in the value tree. -// -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. -func FilterPath(f func(Path) bool, opt Option) Option { - if f == nil { - panic("invalid path filter function") - } - if opt := normalizeOption(opt); opt != nil { - return &pathFilter{fnc: f, opt: opt} - } - return nil -} - -type pathFilter struct { - core - fnc func(Path) bool - opt Option -} - -func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { - if f.fnc(s.curPath) { - return f.opt.filter(s, vx, vy, t) - } - return nil -} - -func (f pathFilter) String() string { - fn := getFuncName(reflect.ValueOf(f.fnc).Pointer()) - return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt) -} - -// FilterValues returns a new Option where opt is only evaluated if filter f, -// which is a function of the form "func(T, T) bool", returns true for the -// current pair of values being compared. If the type of the values is not -// assignable to T, then this filter implicitly returns false. -// -// The filter function must be -// symmetric (i.e., agnostic to the order of the inputs) and -// deterministic (i.e., produces the same result when given the same inputs). -// If T is an interface, it is possible that f is called with two values with -// different concrete types that both implement T. -// -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. -func FilterValues(f interface{}, opt Option) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { - panic(fmt.Sprintf("invalid values filter function: %T", f)) - } - if opt := normalizeOption(opt); opt != nil { - vf := &valuesFilter{fnc: v, opt: opt} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - vf.typ = ti - } - return vf - } - return nil -} - -type valuesFilter struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool - opt Option -} - -func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { - if !vx.IsValid() || !vy.IsValid() { - return invalid{} - } - if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { - return f.opt.filter(s, vx, vy, t) - } - return nil -} - -func (f valuesFilter) String() string { - fn := getFuncName(f.fnc.Pointer()) - return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt) -} - -// Ignore is an Option that causes all comparisons to be ignored. -// This value is intended to be combined with FilterPath or FilterValues. -// It is an error to pass an unfiltered Ignore option to Equal. -func Ignore() Option { return ignore{} } - -type ignore struct{ core } - -func (ignore) isFiltered() bool { return false } -func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} } -func (ignore) apply(_ *state, _, _ reflect.Value) { return } -func (ignore) String() string { return "Ignore()" } - -// invalid is a sentinel Option type to indicate that some options could not -// be evaluated due to unexported fields. -type invalid struct{ core } - -func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} } -func (invalid) apply(s *state, _, _ reflect.Value) { - const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) -} - -// Transformer returns an Option that applies a transformation function that -// converts values of a certain type into that of another. -// -// The transformer f must be a function "func(T) R" that converts values of -// type T to those of type R and is implicitly filtered to input values -// assignable to T. The transformer must not mutate T in any way. -// -// To help prevent some cases of infinite recursive cycles applying the -// same transform to the output of itself (e.g., in the case where the -// input and output types are the same), an implicit filter is added such that -// a transformer is applicable only if that exact transformer is not already -// in the tail of the Path since the last non-Transform step. -// -// The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep. If empty, an arbitrary name is used. -func Transformer(name string, f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { - panic(fmt.Sprintf("invalid transformer function: %T", f)) - } - if name == "" { - name = "λ" // Lambda-symbol as place-holder for anonymous transformer - } - if !isValid(name) { - panic(fmt.Sprintf("invalid name: %q", name)) - } - tr := &transformer{name: name, fnc: reflect.ValueOf(f)} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - tr.typ = ti - } - return tr -} - -type transformer struct { - core - name string - typ reflect.Type // T - fnc reflect.Value // func(T) R -} - -func (tr *transformer) isFiltered() bool { return tr.typ != nil } - -func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption { - for i := len(s.curPath) - 1; i >= 0; i-- { - if t, ok := s.curPath[i].(*transform); !ok { - break // Hit most recent non-Transform step - } else if tr == t.trans { - return nil // Cannot directly use same Transform - } - } - if tr.typ == nil || t.AssignableTo(tr.typ) { - return tr - } - return nil -} - -func (tr *transformer) apply(s *state, vx, vy reflect.Value) { - // Update path before calling the Transformer so that dynamic checks - // will use the updated path. - s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr}) - defer s.curPath.pop() - - vx = s.callTRFunc(tr.fnc, vx) - vy = s.callTRFunc(tr.fnc, vy) - s.compareAny(vx, vy) -} - -func (tr transformer) String() string { - return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer())) -} - -// Comparer returns an Option that determines whether two values are equal -// to each other. -// -// The comparer f must be a function "func(T, T) bool" and is implicitly -// filtered to input values assignable to T. If T is an interface, it is -// possible that f is called with two values of different concrete types that -// both implement T. -// -// The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y -func Comparer(f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Equal) || v.IsNil() { - panic(fmt.Sprintf("invalid comparer function: %T", f)) - } - cm := &comparer{fnc: v} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - cm.typ = ti - } - return cm -} - -type comparer struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (cm *comparer) isFiltered() bool { return cm.typ != nil } - -func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { - if cm.typ == nil || t.AssignableTo(cm.typ) { - return cm - } - return nil -} - -func (cm *comparer) apply(s *state, vx, vy reflect.Value) { - eq := s.callTTBFunc(cm.fnc, vx, vy) - s.report(eq, vx, vy) -} - -func (cm comparer) String() string { - return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer())) -} - -// AllowUnexported returns an Option that forcibly allows operations on -// unexported fields in certain structs, which are specified by passing in a -// value of each struct type. -// -// Users of this option must understand that comparing on unexported fields -// from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of Equal -// to unexpectedly change. However, it may be valid to use this option on types -// defined in an internal package where the semantic meaning of an unexported -// field is in the control of the user. -// -// For some cases, a custom Comparer should be used instead that defines -// equality as a function of the public API of a type rather than the underlying -// unexported implementation. -// -// For example, the reflect.Type documentation defines equality to be determined -// by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *regexp.Regexp types are interested -// in only checking that the regular expression strings are equal. -// Both of these are accomplished using Comparers: -// -// Comparer(func(x, y reflect.Type) bool { return x == y }) -// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) -// -// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore -// all unexported fields on specified struct types. -func AllowUnexported(types ...interface{}) Option { - if !supportAllowUnexported { - panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") - } - m := make(map[reflect.Type]bool) - for _, typ := range types { - t := reflect.TypeOf(typ) - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("invalid struct type: %T", typ)) - } - m[t] = true - } - return visibleStructs(m) -} - -type visibleStructs map[reflect.Type]bool - -func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { - panic("not implemented") -} - -// reporter is an Option that configures how differences are reported. -type reporter interface { - // TODO: Not exported yet. - // - // Perhaps add PushStep and PopStep and change Report to only accept - // a PathStep instead of the full-path? Adding a PushStep and PopStep makes - // it clear that we are traversing the value tree in a depth-first-search - // manner, which has an effect on how values are printed. - - Option - - // Report is called for every comparison made and will be provided with - // the two values being compared, the equality result, and the - // current path in the value tree. It is possible for x or y to be an - // invalid reflect.Value if one of the values is non-existent; - // which is possible with maps and slices. - Report(x, y reflect.Value, eq bool, p Path) -} - -// normalizeOption normalizes the input options such that all Options groups -// are flattened and groups with a single element are reduced to that element. -// Only coreOptions and Options containing coreOptions are allowed. -func normalizeOption(src Option) Option { - switch opts := flattenOptions(nil, Options{src}); len(opts) { - case 0: - return nil - case 1: - return opts[0] - default: - return opts - } -} - -// flattenOptions copies all options in src to dst as a flat list. -// Only coreOptions and Options containing coreOptions are allowed. -func flattenOptions(dst, src Options) Options { - for _, opt := range src { - switch opt := opt.(type) { - case nil: - continue - case Options: - dst = flattenOptions(dst, opt) - case coreOption: - dst = append(dst, opt) - default: - panic(fmt.Sprintf("invalid option type: %T", opt)) - } - } - return dst -} - -// getFuncName returns a short function name from the pointer. -// The string parsing logic works up until Go1.9. -func getFuncName(p uintptr) string { - fnc := runtime.FuncForPC(p) - if fnc == nil { - return "" - } - name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm" - if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") { - // Strip the package name from method name. - name = strings.TrimSuffix(name, ")-fm") - name = strings.TrimSuffix(name, ")·fm") - if i := strings.LastIndexByte(name, '('); i >= 0 { - methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc" - if j := strings.LastIndexByte(methodName, '.'); j >= 0 { - methodName = methodName[j+1:] // E.g., "myfunc" - } - name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc" - } - } - if i := strings.LastIndexByte(name, '/'); i >= 0 { - // Strip the package name. - name = name[i+1:] // E.g., "mypkg.(mytype).myfunc" - } - return name -} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go deleted file mode 100644 index c08a3cf80..000000000 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -type ( - // Path is a list of PathSteps describing the sequence of operations to get - // from some root type to the current position in the value tree. - // The first Path element is always an operation-less PathStep that exists - // simply to identify the initial type. - // - // When traversing structs with embedded structs, the embedded struct will - // always be accessed as a field before traversing the fields of the - // embedded struct themselves. That is, an exported field from the - // embedded struct will never be accessed directly from the parent struct. - Path []PathStep - - // PathStep is a union-type for specific operations to traverse - // a value's tree structure. Users of this package never need to implement - // these types as values of this type will be returned by this package. - PathStep interface { - String() string - Type() reflect.Type // Resulting type after performing the path step - isPathStep() - } - - // SliceIndex is an index operation on a slice or array at some index Key. - SliceIndex interface { - PathStep - Key() int // May return -1 if in a split state - - // SplitKeys returns the indexes for indexing into slices in the - // x and y values, respectively. These indexes may differ due to the - // insertion or removal of an element in one of the slices, causing - // all of the indexes to be shifted. If an index is -1, then that - // indicates that the element does not exist in the associated slice. - // - // Key is guaranteed to return -1 if and only if the indexes returned - // by SplitKeys are not the same. SplitKeys will never return -1 for - // both indexes. - SplitKeys() (x int, y int) - - isSliceIndex() - } - // MapIndex is an index operation on a map at some index Key. - MapIndex interface { - PathStep - Key() reflect.Value - isMapIndex() - } - // TypeAssertion represents a type assertion on an interface. - TypeAssertion interface { - PathStep - isTypeAssertion() - } - // StructField represents a struct field access on a field called Name. - StructField interface { - PathStep - Name() string - Index() int - isStructField() - } - // Indirect represents pointer indirection on the parent type. - Indirect interface { - PathStep - isIndirect() - } - // Transform is a transformation from the parent type to the current type. - Transform interface { - PathStep - Name() string - Func() reflect.Value - - // Option returns the originally constructed Transformer option. - // The == operator can be used to detect the exact option used. - Option() Option - - isTransform() - } -) - -func (pa *Path) push(s PathStep) { - *pa = append(*pa, s) -} - -func (pa *Path) pop() { - *pa = (*pa)[:len(*pa)-1] -} - -// Last returns the last PathStep in the Path. -// If the path is empty, this returns a non-nil PathStep that reports a nil Type. -func (pa Path) Last() PathStep { - return pa.Index(-1) -} - -// Index returns the ith step in the Path and supports negative indexing. -// A negative index starts counting from the tail of the Path such that -1 -// refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil PathStep that reports a nil Type. -func (pa Path) Index(i int) PathStep { - if i < 0 { - i = len(pa) + i - } - if i < 0 || i >= len(pa) { - return pathStep{} - } - return pa[i] -} - -// String returns the simplified path to a node. -// The simplified path only contains struct field accesses. -// -// For example: -// MyMap.MySlices.MyField -func (pa Path) String() string { - var ss []string - for _, s := range pa { - if _, ok := s.(*structField); ok { - ss = append(ss, s.String()) - } - } - return strings.TrimPrefix(strings.Join(ss, ""), ".") -} - -// GoString returns the path to a specific node using Go syntax. -// -// For example: -// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField -func (pa Path) GoString() string { - var ssPre, ssPost []string - var numIndirect int - for i, s := range pa { - var nextStep PathStep - if i+1 < len(pa) { - nextStep = pa[i+1] - } - switch s := s.(type) { - case *indirect: - numIndirect++ - pPre, pPost := "(", ")" - switch nextStep.(type) { - case *indirect: - continue // Next step is indirection, so let them batch up - case *structField: - numIndirect-- // Automatic indirection on struct fields - case nil: - pPre, pPost = "", "" // Last step; no need for parenthesis - } - if numIndirect > 0 { - ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) - ssPost = append(ssPost, pPost) - } - numIndirect = 0 - continue - case *transform: - ssPre = append(ssPre, s.trans.name+"(") - ssPost = append(ssPost, ")") - continue - case *typeAssertion: - // As a special-case, elide type assertions on anonymous types - // since they are typically generated dynamically and can be very - // verbose. For example, some transforms return interface{} because - // of Go's lack of generics, but typically take in and return the - // exact same concrete type. - if s.Type().PkgPath() == "" { - continue - } - } - ssPost = append(ssPost, s.String()) - } - for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { - ssPre[i], ssPre[j] = ssPre[j], ssPre[i] - } - return strings.Join(ssPre, "") + strings.Join(ssPost, "") -} - -type ( - pathStep struct { - typ reflect.Type - } - - sliceIndex struct { - pathStep - xkey, ykey int - } - mapIndex struct { - pathStep - key reflect.Value - } - typeAssertion struct { - pathStep - } - structField struct { - pathStep - name string - idx int - - // These fields are used for forcibly accessing an unexported field. - // pvx, pvy, and field are only valid if unexported is true. - unexported bool - force bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values - field reflect.StructField // Field information - } - indirect struct { - pathStep - } - transform struct { - pathStep - trans *transformer - } -) - -func (ps pathStep) Type() reflect.Type { return ps.typ } -func (ps pathStep) String() string { - if ps.typ == nil { - return "" - } - s := ps.typ.String() - if s == "" || strings.ContainsAny(s, "{}\n") { - return "root" // Type too simple or complex to print - } - return fmt.Sprintf("{%s}", s) -} - -func (si sliceIndex) String() string { - switch { - case si.xkey == si.ykey: - return fmt.Sprintf("[%d]", si.xkey) - case si.ykey == -1: - // [5->?] means "I don't know where X[5] went" - return fmt.Sprintf("[%d->?]", si.xkey) - case si.xkey == -1: - // [?->3] means "I don't know where Y[3] came from" - return fmt.Sprintf("[?->%d]", si.ykey) - default: - // [5->3] means "X[5] moved to Y[3]" - return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) - } -} -func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } -func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } -func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) } -func (in indirect) String() string { return "*" } -func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } - -func (si sliceIndex) Key() int { - if si.xkey != si.ykey { - return -1 - } - return si.xkey -} -func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey } -func (mi mapIndex) Key() reflect.Value { return mi.key } -func (sf structField) Name() string { return sf.name } -func (sf structField) Index() int { return sf.idx } -func (tf transform) Name() string { return tf.trans.name } -func (tf transform) Func() reflect.Value { return tf.trans.fnc } -func (tf transform) Option() Option { return tf.trans } - -func (pathStep) isPathStep() {} -func (sliceIndex) isSliceIndex() {} -func (mapIndex) isMapIndex() {} -func (typeAssertion) isTypeAssertion() {} -func (structField) isStructField() {} -func (indirect) isIndirect() {} -func (transform) isTransform() {} - -var ( - _ SliceIndex = sliceIndex{} - _ MapIndex = mapIndex{} - _ TypeAssertion = typeAssertion{} - _ StructField = structField{} - _ Indirect = indirect{} - _ Transform = transform{} - - _ PathStep = sliceIndex{} - _ PathStep = mapIndex{} - _ PathStep = typeAssertion{} - _ PathStep = structField{} - _ PathStep = indirect{} - _ PathStep = transform{} -) - -// isExported reports whether the identifier is exported. -func isExported(id string) bool { - r, _ := utf8.DecodeRuneInString(id) - return unicode.IsUpper(r) -} - -// isValid reports whether the identifier is valid. -// Empty and underscore-only strings are not valid. -func isValid(id string) bool { - ok := id != "" && id != "_" - for j, c := range id { - ok = ok && (j > 0 || !unicode.IsDigit(c)) - ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c)) - } - return ok -} diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go deleted file mode 100644 index 20e9f18e0..000000000 --- a/vendor/github.com/google/go-cmp/cmp/reporter.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/value" -) - -type defaultReporter struct { - Option - diffs []string // List of differences, possibly truncated - ndiffs int // Total number of differences - nbytes int // Number of bytes in diffs - nlines int // Number of lines in diffs -} - -var _ reporter = (*defaultReporter)(nil) - -func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) { - if eq { - return // Ignore equal results - } - const maxBytes = 4096 - const maxLines = 256 - r.ndiffs++ - if r.nbytes < maxBytes && r.nlines < maxLines { - sx := value.Format(x, value.FormatConfig{UseStringer: true}) - sy := value.Format(y, value.FormatConfig{UseStringer: true}) - if sx == sy { - // Unhelpful output, so use more exact formatting. - sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true}) - sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true}) - } - s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy) - r.diffs = append(r.diffs, s) - r.nbytes += len(s) - r.nlines += strings.Count(s, "\n") - } -} - -func (r *defaultReporter) String() string { - s := strings.Join(r.diffs, "") - if r.ndiffs == len(r.diffs) { - return s - } - return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs)) -} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go deleted file mode 100644 index d1518eb3a..000000000 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build purego appengine js - -package cmp - -import "reflect" - -const supportAllowUnexported = false - -func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value { - panic("unsafeRetrieveField is not implemented") -} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go deleted file mode 100644 index 579b65507..000000000 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build !purego,!appengine,!js - -package cmp - -import ( - "reflect" - "unsafe" -) - -const supportAllowUnexported = true - -// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct -// such that the value has read-write permissions. -// -// The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value { - return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() -} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE deleted file mode 100644 index be2cc4dfb..000000000 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go deleted file mode 100644 index 5673773b2..000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ /dev/null @@ -1,161 +0,0 @@ -package simplelru - -import ( - "container/list" - "errors" -) - -// EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) - -// LRU implements a non-thread safe fixed size LRU cache -type LRU struct { - size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} -} - -// NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { - if size <= 0 { - return nil, errors.New("Must provide a positive size") - } - c := &LRU{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - onEvict: onEvict, - } - return c, nil -} - -// Purge is used to completely clear the cache. -func (c *LRU) Purge() { - for k, v := range c.items { - if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) - } - delete(c.items, k) - } - c.evictList.Init() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) (evicted bool) { - // Check for existing item - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value - return false - } - - // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - - evict := c.evictList.Len() > c.size - // Verify size not exceeded - if evict { - c.removeOldest() - } - return evict -} - -// Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - return ent.Value.(*entry).value, true - } - return -} - -// Contains checks if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { - _, ok = c.items[key] - return ok -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - var ent *list.Element - if ent, ok = c.items[key]; ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *LRU) Remove(key interface{}) (present bool) { - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() -} - -// removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - if c.onEvict != nil { - c.onEvict(kv.key, kv.value) - } -} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go deleted file mode 100644 index 74c707744..000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ /dev/null @@ -1,36 +0,0 @@ -package simplelru - -// LRUCache is the interface for simple LRU cache. -type LRUCache interface { - // Adds a value to the cache, returns true if an eviction occurred and - // updates the "recently used"-ness of the key. - Add(key, value interface{}) bool - - // Returns key's value from the cache and - // updates the "recently used"-ness of the key. #value, isFound - Get(key interface{}) (value interface{}, ok bool) - - // Check if a key exsists in cache without updating the recent-ness. - Contains(key interface{}) (ok bool) - - // Returns key's value without updating the "recently used"-ness of the key. - Peek(key interface{}) (value interface{}, ok bool) - - // Removes a key from the cache. - Remove(key interface{}) bool - - // Removes the oldest entry from cache. - RemoveOldest() (interface{}, interface{}, bool) - - // Returns the oldest entry from the cache. #key, value, isFound - GetOldest() (interface{}, interface{}, bool) - - // Returns a slice of the keys in the cache, from oldest to newest. - Keys() []interface{} - - // Returns the number of items in the cache. - Len() int - - // Clear all cache entries - Purge() -} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d177..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4ba..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5e..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/juju/ratelimit/LICENSE b/vendor/github.com/juju/ratelimit/LICENSE deleted file mode 100644 index ade9307b3..000000000 --- a/vendor/github.com/juju/ratelimit/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -All files in this repository are licensed as follows. If you contribute -to this repository, it is assumed that you license your contribution -under the same license unless you state otherwise. - -All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/juju/ratelimit/README.md b/vendor/github.com/juju/ratelimit/README.md deleted file mode 100644 index a0fdfe2b1..000000000 --- a/vendor/github.com/juju/ratelimit/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# ratelimit --- - import "github.com/juju/ratelimit" - -The ratelimit package provides an efficient token bucket implementation. See -http://en.wikipedia.org/wiki/Token_bucket. - -## Usage - -#### func Reader - -```go -func Reader(r io.Reader, bucket *Bucket) io.Reader -``` -Reader returns a reader that is rate limited by the given token bucket. Each -token in the bucket represents one byte. - -#### func Writer - -```go -func Writer(w io.Writer, bucket *Bucket) io.Writer -``` -Writer returns a writer that is rate limited by the given token bucket. Each -token in the bucket represents one byte. - -#### type Bucket - -```go -type Bucket struct { -} -``` - -Bucket represents a token bucket that fills at a predetermined rate. Methods on -Bucket may be called concurrently. - -#### func NewBucket - -```go -func NewBucket(fillInterval time.Duration, capacity int64) *Bucket -``` -NewBucket returns a new token bucket that fills at the rate of one token every -fillInterval, up to the given maximum capacity. Both arguments must be positive. -The bucket is initially full. - -#### func NewBucketWithQuantum - -```go -func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket -``` -NewBucketWithQuantum is similar to NewBucket, but allows the specification of -the quantum size - quantum tokens are added every fillInterval. - -#### func NewBucketWithRate - -```go -func NewBucketWithRate(rate float64, capacity int64) *Bucket -``` -NewBucketWithRate returns a token bucket that fills the bucket at the rate of -rate tokens per second up to the given maximum capacity. Because of limited -clock resolution, at high rates, the actual rate may be up to 1% different from -the specified rate. - -#### func (*Bucket) Rate - -```go -func (tb *Bucket) Rate() float64 -``` -Rate returns the fill rate of the bucket, in tokens per second. - -#### func (*Bucket) Take - -```go -func (tb *Bucket) Take(count int64) time.Duration -``` -Take takes count tokens from the bucket without blocking. It returns the time -that the caller should wait until the tokens are actually available. - -Note that if the request is irrevocable - there is no way to return tokens to -the bucket once this method commits us to taking them. - -#### func (*Bucket) TakeAvailable - -```go -func (tb *Bucket) TakeAvailable(count int64) int64 -``` -TakeAvailable takes up to count immediately available tokens from the bucket. It -returns the number of tokens removed, or zero if there are no available tokens. -It does not block. - -#### func (*Bucket) TakeMaxDuration - -```go -func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) -``` -TakeMaxDuration is like Take, except that it will only take tokens from the -bucket if the wait time for the tokens is no greater than maxWait. - -If it would take longer than maxWait for the tokens to become available, it does -nothing and reports false, otherwise it returns the time that the caller should -wait until the tokens are actually available, and reports true. - -#### func (*Bucket) Wait - -```go -func (tb *Bucket) Wait(count int64) -``` -Wait takes count tokens from the bucket, waiting until they are available. - -#### func (*Bucket) WaitMaxDuration - -```go -func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool -``` -WaitMaxDuration is like Wait except that it will only take tokens from the -bucket if it needs to wait for no greater than maxWait. It reports whether any -tokens have been removed from the bucket If no tokens have been removed, it -returns immediately. diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go deleted file mode 100644 index bd9ef1038..000000000 --- a/vendor/github.com/juju/ratelimit/ratelimit.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3 with static-linking exception. -// See LICENCE file for details. - -// Package ratelimit provides an efficient token bucket implementation -// that can be used to limit the rate of arbitrary things. -// See http://en.wikipedia.org/wiki/Token_bucket. -package ratelimit - -import ( - "math" - "strconv" - "sync" - "time" -) - -// The algorithm that this implementation uses does computational work -// only when tokens are removed from the bucket, and that work completes -// in short, bounded-constant time (Bucket.Wait benchmarks at 175ns on -// my laptop). -// -// Time is measured in equal measured ticks, a given interval -// (fillInterval) apart. On each tick a number of tokens (quantum) are -// added to the bucket. -// -// When any of the methods are called the bucket updates the number of -// tokens that are in the bucket, and it records the current tick -// number too. Note that it doesn't record the current time - by -// keeping things in units of whole ticks, it's easy to dish out tokens -// at exactly the right intervals as measured from the start time. -// -// This allows us to calculate the number of tokens that will be -// available at some time in the future with a few simple arithmetic -// operations. -// -// The main reason for being able to transfer multiple tokens on each tick -// is so that we can represent rates greater than 1e9 (the resolution of the Go -// time package) tokens per second, but it's also useful because -// it means we can easily represent situations like "a person gets -// five tokens an hour, replenished on the hour". - -// Bucket represents a token bucket that fills at a predetermined rate. -// Methods on Bucket may be called concurrently. -type Bucket struct { - clock Clock - - // startTime holds the moment when the bucket was - // first created and ticks began. - startTime time.Time - - // capacity holds the overall capacity of the bucket. - capacity int64 - - // quantum holds how many tokens are added on - // each tick. - quantum int64 - - // fillInterval holds the interval between each tick. - fillInterval time.Duration - - // mu guards the fields below it. - mu sync.Mutex - - // availableTokens holds the number of available - // tokens as of the associated latestTick. - // It will be negative when there are consumers - // waiting for tokens. - availableTokens int64 - - // latestTick holds the latest tick for which - // we know the number of tokens in the bucket. - latestTick int64 -} - -// NewBucket returns a new token bucket that fills at the -// rate of one token every fillInterval, up to the given -// maximum capacity. Both arguments must be -// positive. The bucket is initially full. -func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { - return NewBucketWithClock(fillInterval, capacity, nil) -} - -// NewBucketWithClock is identical to NewBucket but injects a testable clock -// interface. -func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket { - return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock) -} - -// rateMargin specifes the allowed variance of actual -// rate from specified rate. 1% seems reasonable. -const rateMargin = 0.01 - -// NewBucketWithRate returns a token bucket that fills the bucket -// at the rate of rate tokens per second up to the given -// maximum capacity. Because of limited clock resolution, -// at high rates, the actual rate may be up to 1% different from the -// specified rate. -func NewBucketWithRate(rate float64, capacity int64) *Bucket { - return NewBucketWithRateAndClock(rate, capacity, nil) -} - -// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a -// testable clock interface. -func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket { - // Use the same bucket each time through the loop - // to save allocations. - tb := NewBucketWithQuantumAndClock(1, capacity, 1, clock) - for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) { - fillInterval := time.Duration(1e9 * float64(quantum) / rate) - if fillInterval <= 0 { - continue - } - tb.fillInterval = fillInterval - tb.quantum = quantum - if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin { - return tb - } - } - panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64)) -} - -// nextQuantum returns the next quantum to try after q. -// We grow the quantum exponentially, but slowly, so we -// get a good fit in the lower numbers. -func nextQuantum(q int64) int64 { - q1 := q * 11 / 10 - if q1 == q { - q1++ - } - return q1 -} - -// NewBucketWithQuantum is similar to NewBucket, but allows -// the specification of the quantum size - quantum tokens -// are added every fillInterval. -func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket { - return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, nil) -} - -// NewBucketWithQuantumAndClock is like NewBucketWithQuantum, but -// also has a clock argument that allows clients to fake the passing -// of time. If clock is nil, the system clock will be used. -func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket { - if clock == nil { - clock = realClock{} - } - if fillInterval <= 0 { - panic("token bucket fill interval is not > 0") - } - if capacity <= 0 { - panic("token bucket capacity is not > 0") - } - if quantum <= 0 { - panic("token bucket quantum is not > 0") - } - return &Bucket{ - clock: clock, - startTime: clock.Now(), - latestTick: 0, - fillInterval: fillInterval, - capacity: capacity, - quantum: quantum, - availableTokens: capacity, - } -} - -// Wait takes count tokens from the bucket, waiting until they are -// available. -func (tb *Bucket) Wait(count int64) { - if d := tb.Take(count); d > 0 { - tb.clock.Sleep(d) - } -} - -// WaitMaxDuration is like Wait except that it will -// only take tokens from the bucket if it needs to wait -// for no greater than maxWait. It reports whether -// any tokens have been removed from the bucket -// If no tokens have been removed, it returns immediately. -func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool { - d, ok := tb.TakeMaxDuration(count, maxWait) - if d > 0 { - tb.clock.Sleep(d) - } - return ok -} - -const infinityDuration time.Duration = 0x7fffffffffffffff - -// Take takes count tokens from the bucket without blocking. It returns -// the time that the caller should wait until the tokens are actually -// available. -// -// Note that if the request is irrevocable - there is no way to return -// tokens to the bucket once this method commits us to taking them. -func (tb *Bucket) Take(count int64) time.Duration { - tb.mu.Lock() - defer tb.mu.Unlock() - d, _ := tb.take(tb.clock.Now(), count, infinityDuration) - return d -} - -// TakeMaxDuration is like Take, except that -// it will only take tokens from the bucket if the wait -// time for the tokens is no greater than maxWait. -// -// If it would take longer than maxWait for the tokens -// to become available, it does nothing and reports false, -// otherwise it returns the time that the caller should -// wait until the tokens are actually available, and reports -// true. -func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) { - tb.mu.Lock() - defer tb.mu.Unlock() - return tb.take(tb.clock.Now(), count, maxWait) -} - -// TakeAvailable takes up to count immediately available tokens from the -// bucket. It returns the number of tokens removed, or zero if there are -// no available tokens. It does not block. -func (tb *Bucket) TakeAvailable(count int64) int64 { - tb.mu.Lock() - defer tb.mu.Unlock() - return tb.takeAvailable(tb.clock.Now(), count) -} - -// takeAvailable is the internal version of TakeAvailable - it takes the -// current time as an argument to enable easy testing. -func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 { - if count <= 0 { - return 0 - } - tb.adjustavailableTokens(tb.currentTick(now)) - if tb.availableTokens <= 0 { - return 0 - } - if count > tb.availableTokens { - count = tb.availableTokens - } - tb.availableTokens -= count - return count -} - -// Available returns the number of available tokens. It will be negative -// when there are consumers waiting for tokens. Note that if this -// returns greater than zero, it does not guarantee that calls that take -// tokens from the buffer will succeed, as the number of available -// tokens could have changed in the meantime. This method is intended -// primarily for metrics reporting and debugging. -func (tb *Bucket) Available() int64 { - return tb.available(tb.clock.Now()) -} - -// available is the internal version of available - it takes the current time as -// an argument to enable easy testing. -func (tb *Bucket) available(now time.Time) int64 { - tb.mu.Lock() - defer tb.mu.Unlock() - tb.adjustavailableTokens(tb.currentTick(now)) - return tb.availableTokens -} - -// Capacity returns the capacity that the bucket was created with. -func (tb *Bucket) Capacity() int64 { - return tb.capacity -} - -// Rate returns the fill rate of the bucket, in tokens per second. -func (tb *Bucket) Rate() float64 { - return 1e9 * float64(tb.quantum) / float64(tb.fillInterval) -} - -// take is the internal version of Take - it takes the current time as -// an argument to enable easy testing. -func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) { - if count <= 0 { - return 0, true - } - - tick := tb.currentTick(now) - tb.adjustavailableTokens(tick) - avail := tb.availableTokens - count - if avail >= 0 { - tb.availableTokens = avail - return 0, true - } - // Round up the missing tokens to the nearest multiple - // of quantum - the tokens won't be available until - // that tick. - - // endTick holds the tick when all the requested tokens will - // become available. - endTick := tick + (-avail+tb.quantum-1)/tb.quantum - endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval) - waitTime := endTime.Sub(now) - if waitTime > maxWait { - return 0, false - } - tb.availableTokens = avail - return waitTime, true -} - -// currentTick returns the current time tick, measured -// from tb.startTime. -func (tb *Bucket) currentTick(now time.Time) int64 { - return int64(now.Sub(tb.startTime) / tb.fillInterval) -} - -// adjustavailableTokens adjusts the current number of tokens -// available in the bucket at the given time, which must -// be in the future (positive) with respect to tb.latestTick. -func (tb *Bucket) adjustavailableTokens(tick int64) { - if tb.availableTokens >= tb.capacity { - return - } - tb.availableTokens += (tick - tb.latestTick) * tb.quantum - if tb.availableTokens > tb.capacity { - tb.availableTokens = tb.capacity - } - tb.latestTick = tick - return -} - -// Clock represents the passage of time in a way that -// can be faked out for tests. -type Clock interface { - // Now returns the current time. - Now() time.Time - // Sleep sleeps for at least the given duration. - Sleep(d time.Duration) -} - -// realClock implements Clock in terms of standard time functions. -type realClock struct{} - -// Now implements Clock.Now by calling time.Now. -func (realClock) Now() time.Time { - return time.Now() -} - -// Now implements Clock.Sleep by calling time.Sleep. -func (realClock) Sleep(d time.Duration) { - time.Sleep(d) -} diff --git a/vendor/github.com/juju/ratelimit/reader.go b/vendor/github.com/juju/ratelimit/reader.go deleted file mode 100644 index 6403bf78d..000000000 --- a/vendor/github.com/juju/ratelimit/reader.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3 with static-linking exception. -// See LICENCE file for details. - -package ratelimit - -import "io" - -type reader struct { - r io.Reader - bucket *Bucket -} - -// Reader returns a reader that is rate limited by -// the given token bucket. Each token in the bucket -// represents one byte. -func Reader(r io.Reader, bucket *Bucket) io.Reader { - return &reader{ - r: r, - bucket: bucket, - } -} - -func (r *reader) Read(buf []byte) (int, error) { - n, err := r.r.Read(buf) - if n <= 0 { - return n, err - } - r.bucket.Wait(int64(n)) - return n, err -} - -type writer struct { - w io.Writer - bucket *Bucket -} - -// Writer returns a reader that is rate limited by -// the given token bucket. Each token in the bucket -// represents one byte. -func Writer(w io.Writer, bucket *Bucket) io.Writer { - return &writer{ - w: w, - bucket: bucket, - } -} - -func (w *writer) Write(buf []byte) (int, error) { - w.bucket.Wait(int64(len(buf))) - return w.w.Write(buf) -} diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/github.com/kr/fs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme deleted file mode 100644 index c95e13fc8..000000000 --- a/vendor/github.com/kr/fs/Readme +++ /dev/null @@ -1,3 +0,0 @@ -Filesystem Package - -http://godoc.org/github.com/kr/fs diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go deleted file mode 100644 index f1c4805fb..000000000 --- a/vendor/github.com/kr/fs/filesystem.go +++ /dev/null @@ -1,36 +0,0 @@ -package fs - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// FileSystem defines the methods of an abstract filesystem. -type FileSystem interface { - - // ReadDir reads the directory named by dirname and returns a - // list of directory entries. - ReadDir(dirname string) ([]os.FileInfo, error) - - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. - Lstat(name string) (os.FileInfo, error) - - // Join joins any number of path elements into a single path, adding a - // separator if necessary. The result is Cleaned; in particular, all - // empty strings are ignored. - // - // The separator is FileSystem specific. - Join(elem ...string) string -} - -// fs represents a FileSystem provided by the os package. -type fs struct{} - -func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } - -func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } - -func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) } diff --git a/vendor/github.com/kr/fs/go.mod b/vendor/github.com/kr/fs/go.mod deleted file mode 100644 index 7c206e04c..000000000 --- a/vendor/github.com/kr/fs/go.mod +++ /dev/null @@ -1 +0,0 @@ -module "github.com/kr/fs" diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go deleted file mode 100644 index 6ffa1e0b2..000000000 --- a/vendor/github.com/kr/fs/walk.go +++ /dev/null @@ -1,95 +0,0 @@ -// Package fs provides filesystem-related functions. -package fs - -import ( - "os" -) - -// Walker provides a convenient interface for iterating over the -// descendants of a filesystem path. -// Successive calls to the Step method will step through each -// file or directory in the tree, including the root. The files -// are walked in lexical order, which makes the output deterministic -// but means that for very large directories Walker can be inefficient. -// Walker does not follow symbolic links. -type Walker struct { - fs FileSystem - cur item - stack []item - descend bool -} - -type item struct { - path string - info os.FileInfo - err error -} - -// Walk returns a new Walker rooted at root. -func Walk(root string) *Walker { - return WalkFS(root, new(fs)) -} - -// WalkFS returns a new Walker rooted at root on the FileSystem fs. -func WalkFS(root string, fs FileSystem) *Walker { - info, err := fs.Lstat(root) - return &Walker{ - fs: fs, - stack: []item{{root, info, err}}, - } -} - -// Step advances the Walker to the next file or directory, -// which will then be available through the Path, Stat, -// and Err methods. -// It returns false when the walk stops at the end of the tree. -func (w *Walker) Step() bool { - if w.descend && w.cur.err == nil && w.cur.info.IsDir() { - list, err := w.fs.ReadDir(w.cur.path) - if err != nil { - w.cur.err = err - w.stack = append(w.stack, w.cur) - } else { - for i := len(list) - 1; i >= 0; i-- { - path := w.fs.Join(w.cur.path, list[i].Name()) - w.stack = append(w.stack, item{path, list[i], nil}) - } - } - } - - if len(w.stack) == 0 { - return false - } - i := len(w.stack) - 1 - w.cur = w.stack[i] - w.stack = w.stack[:i] - w.descend = true - return true -} - -// Path returns the path to the most recent file or directory -// visited by a call to Step. It contains the argument to Walk -// as a prefix; that is, if Walk is called with "dir", which is -// a directory containing the file "a", Path will return "dir/a". -func (w *Walker) Path() string { - return w.cur.path -} - -// Stat returns info for the most recent file or directory -// visited by a call to Step. -func (w *Walker) Stat() os.FileInfo { - return w.cur.info -} - -// Err returns the error, if any, for the most recent attempt -// by Step to visit a file or directory. If a directory has -// an error, w will not descend into that directory. -func (w *Walker) Err() error { - return w.cur.err -} - -// SkipDir causes the currently visited directory to be skipped. -// If w is not on a directory, SkipDir has no effect. -func (w *Walker) SkipDir() { - w.descend = false -} diff --git a/vendor/github.com/kurin/blazer/AUTHORS b/vendor/github.com/kurin/blazer/AUTHORS deleted file mode 100644 index 9fd5e55e2..000000000 --- a/vendor/github.com/kurin/blazer/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the list of Blazer authors for copyright purposes. -# -# This does not necessarily list everyone who has contributed code, since in -# some cases, their employer may be the copyright holder. To see the full list -# of contributors, see the revision history in source control. -# -# Tag yourself. -Google LLC diff --git a/vendor/github.com/kurin/blazer/LICENSE b/vendor/github.com/kurin/blazer/LICENSE deleted file mode 100644 index 95f7e9632..000000000 --- a/vendor/github.com/kurin/blazer/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2016, the Blazer authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/kurin/blazer/b2/b2.go b/vendor/github.com/kurin/blazer/b2/b2.go deleted file mode 100644 index 728fcdc88..000000000 --- a/vendor/github.com/kurin/blazer/b2/b2.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright 2016, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b2 provides a high-level interface to Backblaze's B2 cloud storage -// service. -// -// It is specifically designed to abstract away the Backblaze API details by -// providing familiar Go interfaces, specifically an io.Writer for object -// storage, and an io.Reader for object download. Handling of transient -// errors, including network and authentication timeouts, is transparent. -// -// Methods that perform network requests accept a context.Context argument. -// Callers should use the context's cancellation abilities to end requests -// early, or to provide timeout or deadline guarantees. -// -// This package is in development and may make API changes. -package b2 - -import ( - "context" - "fmt" - "io" - "net/http" - "net/url" - "regexp" - "strconv" - "sync" - "time" -) - -// Client is a Backblaze B2 client. -type Client struct { - backend beRootInterface - - slock sync.Mutex - sWriters map[string]*Writer - sReaders map[string]*Reader - sMethods []methodCounter - opts clientOptions -} - -// NewClient creates and returns a new Client with valid B2 service account -// tokens. -func NewClient(ctx context.Context, account, key string, opts ...ClientOption) (*Client, error) { - c := &Client{ - backend: &beRoot{ - b2i: &b2Root{}, - }, - sMethods: []methodCounter{ - newMethodCounter(time.Minute, time.Second), - newMethodCounter(time.Minute*5, time.Second), - newMethodCounter(time.Hour, time.Minute), - newMethodCounter(0, 0), // forever - }, - } - opts = append(opts, client(c)) - for _, f := range opts { - f(&c.opts) - } - if err := c.backend.authorizeAccount(ctx, account, key, c.opts); err != nil { - return nil, err - } - return c, nil -} - -type clientOptions struct { - client *Client - transport http.RoundTripper - failSomeUploads bool - expireTokens bool - capExceeded bool - apiBase string - userAgents []string - writerOpts []WriterOption -} - -// A ClientOption allows callers to adjust various per-client settings. -type ClientOption func(*clientOptions) - -// UserAgent sets the User-Agent HTTP header. The default header is -// "blazer/"; the value set here will be prepended to that. This can -// be set multiple times. -// -// A user agent is generally of the form "/ ()". -func UserAgent(agent string) ClientOption { - return func(o *clientOptions) { - o.userAgents = append(o.userAgents, agent) - } -} - -// APIBase returns a ClientOption specifying the URL root of API requests. -func APIBase(url string) ClientOption { - return func(o *clientOptions) { - o.apiBase = url - } -} - -// Transport sets the underlying HTTP transport mechanism. If unset, -// http.DefaultTransport is used. -func Transport(rt http.RoundTripper) ClientOption { - return func(c *clientOptions) { - c.transport = rt - } -} - -// FailSomeUploads requests intermittent upload failures from the B2 service. -// This is mostly useful for testing. -func FailSomeUploads() ClientOption { - return func(c *clientOptions) { - c.failSomeUploads = true - } -} - -// ExpireSomeAuthTokens requests intermittent authentication failures from the -// B2 service. -func ExpireSomeAuthTokens() ClientOption { - return func(c *clientOptions) { - c.expireTokens = true - } -} - -// ForceCapExceeded requests a cap limit from the B2 service. This causes all -// uploads to be treated as if they would exceed the configure B2 capacity. -func ForceCapExceeded() ClientOption { - return func(c *clientOptions) { - c.capExceeded = true - } -} - -func client(cl *Client) ClientOption { - return func(c *clientOptions) { - c.client = cl - } -} - -type clientTransport struct { - client *Client - rt http.RoundTripper -} - -func (ct *clientTransport) RoundTrip(r *http.Request) (*http.Response, error) { - m := r.Header.Get("X-Blazer-Method") - t := ct.rt - if t == nil { - t = http.DefaultTransport - } - b := time.Now() - resp, err := t.RoundTrip(r) - e := time.Now() - if err != nil { - return resp, err - } - if m != "" && ct.client != nil { - ct.client.slock.Lock() - m := method{ - name: m, - duration: e.Sub(b), - status: resp.StatusCode, - } - for _, counter := range ct.client.sMethods { - counter.record(m) - } - ct.client.slock.Unlock() - } - return resp, nil -} - -// Bucket is a reference to a B2 bucket. -type Bucket struct { - b beBucketInterface - r beRootInterface - - c *Client - urlPool *urlPool -} - -type BucketType string - -const ( - UnknownType BucketType = "" - Private = "allPrivate" - Public = "allPublic" - Snapshot = "snapshot" -) - -// BucketAttrs holds a bucket's metadata attributes. -type BucketAttrs struct { - // Type lists or sets the new bucket type. If Type is UnknownType during a - // bucket.Update, the type is not changed. - Type BucketType - - // Info records user data, limited to ten keys. If nil during a - // bucket.Update, the existing bucket info is not modified. A bucket's - // metadata can be removed by updating with an empty map. - Info map[string]string - - // Reports or sets bucket lifecycle rules. If nil during a bucket.Update, - // the rules are not modified. A bucket's rules can be removed by updating - // with an empty slice. - LifecycleRules []LifecycleRule -} - -// A LifecycleRule describes an object's life cycle, namely how many days after -// uploading an object should be hidden, and after how many days hidden an -// object should be deleted. Multiple rules may not apply to the same file or -// set of files. Be careful when using this feature; it can (is designed to) -// delete your data. -type LifecycleRule struct { - // Prefix specifies all the files in the bucket to which this rule applies. - Prefix string - - // DaysUploadedUntilHidden specifies the number of days after which a file - // will automatically be hidden. 0 means "do not automatically hide new - // files". - DaysNewUntilHidden int - - // DaysHiddenUntilDeleted specifies the number of days after which a hidden - // file is deleted. 0 means "do not automatically delete hidden files". - DaysHiddenUntilDeleted int -} - -type b2err struct { - err error - notFoundErr bool - isUpdateConflict bool -} - -func (e b2err) Error() string { - return e.err.Error() -} - -// IsNotExist reports whether a given error indicates that an object or bucket -// does not exist. -func IsNotExist(err error) bool { - berr, ok := err.(b2err) - if !ok { - return false - } - return berr.notFoundErr -} - -const uploadURLPoolSize = 100 - -type urlPool struct { - ch chan beURLInterface -} - -func newURLPool() *urlPool { - return &urlPool{ch: make(chan beURLInterface, uploadURLPoolSize)} -} - -func (p *urlPool) get() beURLInterface { - select { - case ue := <-p.ch: - // if the channel has an upload URL available, use that - return ue - default: - // otherwise return nil, a new upload URL needs to be generated - return nil - } -} - -func (p *urlPool) put(u beURLInterface) { - select { - case p.ch <- u: - // put the URL back if possible - default: - // if the channel is full, throw it away - } -} - -// Bucket returns a bucket if it exists. -func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) { - buckets, err := c.backend.listBuckets(ctx) - if err != nil { - return nil, err - } - for _, bucket := range buckets { - if bucket.name() == name { - return &Bucket{ - b: bucket, - r: c.backend, - c: c, - urlPool: newURLPool(), - }, nil - } - } - return nil, b2err{ - err: fmt.Errorf("%s: bucket not found", name), - notFoundErr: true, - } -} - -// NewBucket returns a bucket. The bucket is created with the given attributes -// if it does not already exist. If attrs is nil, it is created as a private -// bucket with no info metadata and no lifecycle rules. -func (c *Client) NewBucket(ctx context.Context, name string, attrs *BucketAttrs) (*Bucket, error) { - buckets, err := c.backend.listBuckets(ctx) - if err != nil { - return nil, err - } - for _, bucket := range buckets { - if bucket.name() == name { - return &Bucket{ - b: bucket, - r: c.backend, - c: c, - urlPool: newURLPool(), - }, nil - } - } - if attrs == nil { - attrs = &BucketAttrs{Type: Private} - } - b, err := c.backend.createBucket(ctx, name, string(attrs.Type), attrs.Info, attrs.LifecycleRules) - if err != nil { - return nil, err - } - return &Bucket{ - b: b, - r: c.backend, - c: c, - urlPool: newURLPool(), - }, err -} - -// ListBuckets returns all the available buckets. -func (c *Client) ListBuckets(ctx context.Context) ([]*Bucket, error) { - bs, err := c.backend.listBuckets(ctx) - if err != nil { - return nil, err - } - var buckets []*Bucket - for _, b := range bs { - buckets = append(buckets, &Bucket{ - b: b, - r: c.backend, - c: c, - urlPool: newURLPool(), - }) - } - return buckets, nil -} - -// IsUpdateConflict reports whether a given error is the result of a bucket -// update conflict. -func IsUpdateConflict(err error) bool { - e, ok := err.(b2err) - if !ok { - return false - } - return e.isUpdateConflict -} - -// Update modifies the given bucket with new attributes. It is possible that -// this method could fail with an update conflict, in which case you should -// retrieve the latest bucket attributes with Attrs and try again. -func (b *Bucket) Update(ctx context.Context, attrs *BucketAttrs) error { - return b.b.updateBucket(ctx, attrs) -} - -// Attrs retrieves and returns the current bucket's attributes. -func (b *Bucket) Attrs(ctx context.Context) (*BucketAttrs, error) { - bucket, err := b.c.Bucket(ctx, b.Name()) - if err != nil { - return nil, err - } - b.b = bucket.b - return b.b.attrs(), nil -} - -var bNotExist = regexp.MustCompile("Bucket.*does not exist") - -// Delete removes a bucket. The bucket must be empty. -func (b *Bucket) Delete(ctx context.Context) error { - err := b.b.deleteBucket(ctx) - if err == nil { - return err - } - // So, the B2 documentation disagrees with the implementation here, and the - // error code is not really helpful. If the bucket doesn't exist, the error is - // 400, not 404, and the string is "Bucket does not exist". However, the - // documentation says it will be "Bucket id does not exist". In case - // they update the implementation to match the documentation, we're just going - // to regexp over the error message and hope it's okay. - if bNotExist.MatchString(err.Error()) { - return b2err{ - err: err, - notFoundErr: true, - } - } - return err -} - -// BaseURL returns the base URL to use for all files uploaded to this bucket. -func (b *Bucket) BaseURL() string { - return b.b.baseURL() -} - -// Name returns the bucket's name. -func (b *Bucket) Name() string { - return b.b.name() -} - -// Object represents a B2 object. -type Object struct { - attrs *Attrs - name string - f beFileInterface - b *Bucket -} - -// Attrs holds an object's metadata. -type Attrs struct { - Name string // Not used on upload. - Size int64 // Not used on upload. - ContentType string // Used on upload, default is "application/octet-stream". - Status ObjectState // Not used on upload. - UploadTimestamp time.Time // Not used on upload. - SHA1 string // Can be "none" for large files. If set on upload, will be used for large files. - LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload. - Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys. -} - -// Name returns an object's name -func (o *Object) Name() string { - return o.name -} - -// Attrs returns an object's attributes. -func (o *Object) Attrs(ctx context.Context) (*Attrs, error) { - if err := o.ensure(ctx); err != nil { - return nil, err - } - fi, err := o.f.getFileInfo(ctx) - if err != nil { - return nil, err - } - name, sha, size, ct, info, st, stamp := fi.stats() - var state ObjectState - switch st { - case "upload": - state = Uploaded - case "start": - state = Started - case "hide": - state = Hider - case "folder": - state = Folder - } - var mtime time.Time - if v, ok := info["src_last_modified_millis"]; ok { - ms, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, err - } - mtime = time.Unix(ms/1e3, (ms%1e3)*1e6) - delete(info, "src_last_modified_millis") - } - if v, ok := info["large_file_sha1"]; ok { - sha = v - } - return &Attrs{ - Name: name, - Size: size, - ContentType: ct, - UploadTimestamp: stamp, - SHA1: sha, - Info: info, - Status: state, - LastModified: mtime, - }, nil -} - -// ObjectState represents the various states an object can be in. -type ObjectState int - -const ( - Unknown ObjectState = iota - // Started represents a large upload that has been started but not finished - // or canceled. - Started - // Uploaded represents an object that has finished uploading and is complete. - Uploaded - // Hider represents an object that exists only to hide another object. It - // cannot in itself be downloaded and, in particular, is not a hidden object. - Hider - - // Folder is a special state given to non-objects that are returned during a - // List call with a ListDelimiter option. - Folder -) - -// Object returns a reference to the named object in the bucket. Hidden -// objects cannot be referenced in this manner; they can only be found by -// finding the appropriate reference in ListObjects. -func (b *Bucket) Object(name string) *Object { - return &Object{ - name: name, - b: b, - } -} - -// URL returns the full URL to the given object. -func (o *Object) URL() string { - return fmt.Sprintf("%s/file/%s/%s", o.b.BaseURL(), o.b.Name(), o.name) -} - -// NewWriter returns a new writer for the given object. Objects that are -// overwritten are not deleted, but are "hidden". -// -// Callers must close the writer when finished and check the error status. -func (o *Object) NewWriter(ctx context.Context, opts ...WriterOption) *Writer { - ctx, cancel := context.WithCancel(ctx) - w := &Writer{ - o: o, - name: o.name, - ctx: ctx, - cancel: cancel, - } - for _, f := range o.b.c.opts.writerOpts { - f(w) - } - for _, f := range opts { - f(w) - } - return w -} - -// NewRangeReader returns a reader for the given object, reading up to length -// bytes. If length is negative, the rest of the object is read. -func (o *Object) NewRangeReader(ctx context.Context, offset, length int64) *Reader { - ctx, cancel := context.WithCancel(ctx) - return &Reader{ - ctx: ctx, - cancel: cancel, - o: o, - name: o.name, - chunks: make(map[int]*rchunk), - length: length, - offset: offset, - } -} - -// NewReader returns a reader for the given object. -func (o *Object) NewReader(ctx context.Context) *Reader { - return o.NewRangeReader(ctx, 0, -1) -} - -func (o *Object) ensure(ctx context.Context) error { - if o.f == nil { - f, err := o.b.getObject(ctx, o.name) - if err != nil { - return err - } - o.f = f.f - } - return nil -} - -// Delete removes the given object. -func (o *Object) Delete(ctx context.Context) error { - if err := o.ensure(ctx); err != nil { - return err - } - return o.f.deleteFileVersion(ctx) -} - -// Hide hides the object from name-based listing. -func (o *Object) Hide(ctx context.Context) error { - if err := o.ensure(ctx); err != nil { - return err - } - _, err := o.b.b.hideFile(ctx, o.name) - return err -} - -// Reveal unhides (if hidden) the named object. If there are multiple objects -// of a given name, it will reveal the most recent. -func (b *Bucket) Reveal(ctx context.Context, name string) error { - iter := b.List(ctx, ListPrefix(name), ListHidden()) - for iter.Next() { - obj := iter.Object() - if obj.Name() == name { - if obj.f.status() == "hide" { - return obj.Delete(ctx) - } - return nil - } - if obj.Name() > name { - break - } - } - return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true} -} - -// I don't want to import all of ioutil for this. -type discard struct{} - -func (discard) Write(p []byte) (int, error) { - return len(p), nil -} - -func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) { - fr, err := b.b.downloadFileByName(ctx, name, 0, 1) - if err != nil { - return nil, err - } - io.Copy(discard{}, fr) - fr.Close() - return &Object{ - name: name, - f: b.b.file(fr.id(), name), - b: b, - }, nil -} - -// AuthToken returns an authorization token that can be used to access objects -// in a private bucket. Only objects that begin with prefix can be accessed. -// The token expires after the given duration. -func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) { - return b.b.getDownloadAuthorization(ctx, prefix, valid, "") -} - -// AuthURL returns a URL for the given object with embedded token and, -// possibly, b2ContentDisposition arguments. Leave b2cd blank for no content -// disposition. -func (o *Object) AuthURL(ctx context.Context, valid time.Duration, b2cd string) (*url.URL, error) { - token, err := o.b.b.getDownloadAuthorization(ctx, o.name, valid, b2cd) - if err != nil { - return nil, err - } - urlString := fmt.Sprintf("%s?Authorization=%s", o.URL(), url.QueryEscape(token)) - if b2cd != "" { - urlString = fmt.Sprintf("%s&b2ContentDisposition=%s", urlString, url.QueryEscape(b2cd)) - } - u, err := url.Parse(urlString) - if err != nil { - return nil, err - } - return u, nil -} diff --git a/vendor/github.com/kurin/blazer/b2/backend.go b/vendor/github.com/kurin/blazer/b2/backend.go deleted file mode 100644 index 5ca3bd458..000000000 --- a/vendor/github.com/kurin/blazer/b2/backend.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2016, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "context" - "io" - "math/rand" - "time" -) - -// This file wraps the baseline interfaces with backoff and retry semantics. - -type beRootInterface interface { - backoff(error) time.Duration - reauth(error) bool - transient(error) bool - reupload(error) bool - authorizeAccount(context.Context, string, string, clientOptions) error - reauthorizeAccount(context.Context) error - createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) - listBuckets(context.Context) ([]beBucketInterface, error) - createKey(context.Context, string, []string, time.Duration, string, string) (beKeyInterface, error) - listKeys(context.Context, int, string) ([]beKeyInterface, string, error) -} - -type beRoot struct { - account, key string - b2i b2RootInterface - options clientOptions -} - -type beBucketInterface interface { - name() string - btype() BucketType - attrs() *BucketAttrs - id() string - updateBucket(context.Context, *BucketAttrs) error - deleteBucket(context.Context) error - getUploadURL(context.Context) (beURLInterface, error) - startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (beLargeFileInterface, error) - listFileNames(context.Context, int, string, string, string) ([]beFileInterface, string, error) - listFileVersions(context.Context, int, string, string, string, string) ([]beFileInterface, string, string, error) - listUnfinishedLargeFiles(context.Context, int, string) ([]beFileInterface, string, error) - downloadFileByName(context.Context, string, int64, int64) (beFileReaderInterface, error) - hideFile(context.Context, string) (beFileInterface, error) - getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) - baseURL() string - file(string, string) beFileInterface -} - -type beBucket struct { - b2bucket b2BucketInterface - ri beRootInterface -} - -type beURLInterface interface { - uploadFile(context.Context, readResetter, int, string, string, string, map[string]string) (beFileInterface, error) -} - -type beURL struct { - b2url b2URLInterface - ri beRootInterface -} - -type beFileInterface interface { - name() string - size() int64 - timestamp() time.Time - status() string - deleteFileVersion(context.Context) error - getFileInfo(context.Context) (beFileInfoInterface, error) - listParts(context.Context, int, int) ([]beFilePartInterface, int, error) - compileParts(int64, map[int]string) beLargeFileInterface -} - -type beFile struct { - b2file b2FileInterface - url beURLInterface - ri beRootInterface -} - -type beLargeFileInterface interface { - finishLargeFile(context.Context) (beFileInterface, error) - getUploadPartURL(context.Context) (beFileChunkInterface, error) - cancel(context.Context) error -} - -type beLargeFile struct { - b2largeFile b2LargeFileInterface - ri beRootInterface -} - -type beFileChunkInterface interface { - reload(context.Context) error - uploadPart(context.Context, readResetter, string, int, int) (int, error) -} - -type beFileChunk struct { - b2fileChunk b2FileChunkInterface - ri beRootInterface -} - -type beFileReaderInterface interface { - io.ReadCloser - stats() (int, string, string, map[string]string) - id() string -} - -type beFileReader struct { - b2fileReader b2FileReaderInterface - ri beRootInterface -} - -type beFileInfoInterface interface { - stats() (string, string, int64, string, map[string]string, string, time.Time) -} - -type beFilePartInterface interface { - number() int - sha1() string - size() int64 -} - -type beFilePart struct { - b2filePart b2FilePartInterface - ri beRootInterface -} - -type beFileInfo struct { - name string - sha string - size int64 - ct string - info map[string]string - status string - stamp time.Time -} - -type beKeyInterface interface { - del(context.Context) error - caps() []string - name() string - expires() time.Time - secret() string - id() string -} - -type beKey struct { - b2i beRootInterface - k b2KeyInterface -} - -func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) } -func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) } -func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) } -func (r *beRoot) transient(err error) bool { return r.b2i.transient(err) } - -func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error { - f := func() error { - if err := r.b2i.authorizeAccount(ctx, account, key, c); err != nil { - return err - } - r.account = account - r.key = key - r.options = c - return nil - } - return withBackoff(ctx, r, f) -} - -func (r *beRoot) reauthorizeAccount(ctx context.Context) error { - return r.authorizeAccount(ctx, r.account, r.key, r.options) -} - -func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) { - var bi beBucketInterface - f := func() error { - g := func() error { - bucket, err := r.b2i.createBucket(ctx, name, btype, info, rules) - if err != nil { - return err - } - bi = &beBucket{ - b2bucket: bucket, - ri: r, - } - return nil - } - return withReauth(ctx, r, g) - } - if err := withBackoff(ctx, r, f); err != nil { - return nil, err - } - return bi, nil -} - -func (r *beRoot) listBuckets(ctx context.Context) ([]beBucketInterface, error) { - var buckets []beBucketInterface - f := func() error { - g := func() error { - bs, err := r.b2i.listBuckets(ctx) - if err != nil { - return err - } - for _, b := range bs { - buckets = append(buckets, &beBucket{ - b2bucket: b, - ri: r, - }) - } - return nil - } - return withReauth(ctx, r, g) - } - if err := withBackoff(ctx, r, f); err != nil { - return nil, err - } - return buckets, nil -} - -func (r *beRoot) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (beKeyInterface, error) { - var k *beKey - f := func() error { - g := func() error { - got, err := r.b2i.createKey(ctx, name, caps, valid, bucketID, prefix) - if err != nil { - return err - } - k = &beKey{ - b2i: r, - k: got, - } - return nil - } - return withReauth(ctx, r, g) - } - if err := withBackoff(ctx, r, f); err != nil { - return nil, err - } - return k, nil -} - -func (r *beRoot) listKeys(ctx context.Context, max int, next string) ([]beKeyInterface, string, error) { - var keys []beKeyInterface - var cur string - f := func() error { - g := func() error { - got, n, err := r.b2i.listKeys(ctx, max, next) - if err != nil { - return err - } - cur = n - for _, g := range got { - keys = append(keys, &beKey{ - b2i: r, - k: g, - }) - } - return nil - } - return withReauth(ctx, r, g) - } - if err := withBackoff(ctx, r, f); err != nil { - return nil, "", err - } - return keys, cur, nil -} - -func (b *beBucket) name() string { return b.b2bucket.name() } -func (b *beBucket) btype() BucketType { return BucketType(b.b2bucket.btype()) } -func (b *beBucket) attrs() *BucketAttrs { return b.b2bucket.attrs() } -func (b *beBucket) id() string { return b.b2bucket.id() } - -func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error { - f := func() error { - g := func() error { - return b.b2bucket.updateBucket(ctx, attrs) - } - return withReauth(ctx, b.ri, g) - } - return withBackoff(ctx, b.ri, f) -} - -func (b *beBucket) deleteBucket(ctx context.Context) error { - f := func() error { - g := func() error { - return b.b2bucket.deleteBucket(ctx) - } - return withReauth(ctx, b.ri, g) - } - return withBackoff(ctx, b.ri, f) -} - -func (b *beBucket) getUploadURL(ctx context.Context) (beURLInterface, error) { - var url beURLInterface - f := func() error { - g := func() error { - u, err := b.b2bucket.getUploadURL(ctx) - if err != nil { - return err - } - url = &beURL{ - b2url: u, - ri: b.ri, - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return url, nil -} - -func (b *beBucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (beLargeFileInterface, error) { - var file beLargeFileInterface - f := func() error { - g := func() error { - f, err := b.b2bucket.startLargeFile(ctx, name, ct, info) - if err != nil { - return err - } - file = &beLargeFile{ - b2largeFile: f, - ri: b.ri, - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return file, nil -} - -func (b *beBucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]beFileInterface, string, error) { - var cont string - var files []beFileInterface - f := func() error { - g := func() error { - fs, c, err := b.b2bucket.listFileNames(ctx, count, continuation, prefix, delimiter) - if err != nil { - return err - } - cont = c - for _, f := range fs { - files = append(files, &beFile{ - b2file: f, - ri: b.ri, - }) - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, "", err - } - return files, cont, nil -} - -func (b *beBucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]beFileInterface, string, string, error) { - var name, id string - var files []beFileInterface - f := func() error { - g := func() error { - fs, n, d, err := b.b2bucket.listFileVersions(ctx, count, nextName, nextID, prefix, delimiter) - if err != nil { - return err - } - name = n - id = d - for _, f := range fs { - files = append(files, &beFile{ - b2file: f, - ri: b.ri, - }) - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, "", "", err - } - return files, name, id, nil -} - -func (b *beBucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]beFileInterface, string, error) { - var cont string - var files []beFileInterface - f := func() error { - g := func() error { - fs, c, err := b.b2bucket.listUnfinishedLargeFiles(ctx, count, continuation) - if err != nil { - return err - } - cont = c - for _, f := range fs { - files = append(files, &beFile{ - b2file: f, - ri: b.ri, - }) - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, "", err - } - return files, cont, nil -} - -func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (beFileReaderInterface, error) { - var reader beFileReaderInterface - f := func() error { - g := func() error { - fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size) - if err != nil { - return err - } - reader = &beFileReader{ - b2fileReader: fr, - ri: b.ri, - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return reader, nil -} - -func (b *beBucket) hideFile(ctx context.Context, name string) (beFileInterface, error) { - var file beFileInterface - f := func() error { - g := func() error { - f, err := b.b2bucket.hideFile(ctx, name) - if err != nil { - return err - } - file = &beFile{ - b2file: f, - ri: b.ri, - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return file, nil -} - -func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) { - var tok string - f := func() error { - g := func() error { - t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v, s) - if err != nil { - return err - } - tok = t - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return "", err - } - return tok, nil -} - -func (b *beBucket) baseURL() string { - return b.b2bucket.baseURL() -} - -func (b *beBucket) file(id, name string) beFileInterface { - return &beFile{ - b2file: b.b2bucket.file(id, name), - ri: b.ri, - } -} - -func (b *beURL) uploadFile(ctx context.Context, r readResetter, size int, name, ct, sha1 string, info map[string]string) (beFileInterface, error) { - var file beFileInterface - f := func() error { - if err := r.Reset(); err != nil { - return err - } - f, err := b.b2url.uploadFile(ctx, r, size, name, ct, sha1, info) - if err != nil { - return err - } - file = &beFile{ - b2file: f, - url: b, - ri: b.ri, - } - return nil - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return file, nil -} - -func (b *beFile) deleteFileVersion(ctx context.Context) error { - f := func() error { - g := func() error { - return b.b2file.deleteFileVersion(ctx) - } - return withReauth(ctx, b.ri, g) - } - return withBackoff(ctx, b.ri, f) -} - -func (b *beFile) size() int64 { - return b.b2file.size() -} - -func (b *beFile) name() string { - return b.b2file.name() -} - -func (b *beFile) timestamp() time.Time { - return b.b2file.timestamp() -} - -func (b *beFile) status() string { - return b.b2file.status() -} - -func (b *beFile) getFileInfo(ctx context.Context) (beFileInfoInterface, error) { - var fileInfo beFileInfoInterface - f := func() error { - g := func() error { - fi, err := b.b2file.getFileInfo(ctx) - if err != nil { - return err - } - name, sha, size, ct, info, status, stamp := fi.stats() - fileInfo = &beFileInfo{ - name: name, - sha: sha, - size: size, - ct: ct, - info: info, - status: status, - stamp: stamp, - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return fileInfo, nil -} - -func (b *beFile) listParts(ctx context.Context, next, count int) ([]beFilePartInterface, int, error) { - var fpi []beFilePartInterface - var rnxt int - f := func() error { - g := func() error { - ps, n, err := b.b2file.listParts(ctx, next, count) - if err != nil { - return err - } - rnxt = n - for _, p := range ps { - fpi = append(fpi, &beFilePart{ - b2filePart: p, - ri: b.ri, - }) - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, 0, err - } - return fpi, rnxt, nil -} - -func (b *beFile) compileParts(size int64, seen map[int]string) beLargeFileInterface { - return &beLargeFile{ - b2largeFile: b.b2file.compileParts(size, seen), - ri: b.ri, - } -} - -func (b *beLargeFile) getUploadPartURL(ctx context.Context) (beFileChunkInterface, error) { - var chunk beFileChunkInterface - f := func() error { - g := func() error { - fc, err := b.b2largeFile.getUploadPartURL(ctx) - if err != nil { - return err - } - chunk = &beFileChunk{ - b2fileChunk: fc, - ri: b.ri, - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return chunk, nil -} - -func (b *beLargeFile) finishLargeFile(ctx context.Context) (beFileInterface, error) { - var file beFileInterface - f := func() error { - g := func() error { - f, err := b.b2largeFile.finishLargeFile(ctx) - if err != nil { - return err - } - file = &beFile{ - b2file: f, - ri: b.ri, - } - return nil - } - return withReauth(ctx, b.ri, g) - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return nil, err - } - return file, nil -} - -func (b *beLargeFile) cancel(ctx context.Context) error { - f := func() error { - g := func() error { - return b.b2largeFile.cancel(ctx) - } - return withReauth(ctx, b.ri, g) - } - return withBackoff(ctx, b.ri, f) -} - -func (b *beFileChunk) reload(ctx context.Context) error { - f := func() error { - g := func() error { - return b.b2fileChunk.reload(ctx) - } - return withReauth(ctx, b.ri, g) - } - return withBackoff(ctx, b.ri, f) -} - -func (b *beFileChunk) uploadPart(ctx context.Context, r readResetter, sha1 string, size, index int) (int, error) { - // no re-auth; pass it back up to the caller so they can get an new upload URI and token - // TODO: we should handle that here probably - var i int - f := func() error { - if err := r.Reset(); err != nil { - return err - } - j, err := b.b2fileChunk.uploadPart(ctx, r, sha1, size, index) - if err != nil { - return err - } - i = j - return nil - } - if err := withBackoff(ctx, b.ri, f); err != nil { - return 0, err - } - return i, nil -} - -func (b *beFileReader) Read(p []byte) (int, error) { - return b.b2fileReader.Read(p) -} - -func (b *beFileReader) Close() error { - return b.b2fileReader.Close() -} - -func (b *beFileReader) stats() (int, string, string, map[string]string) { - return b.b2fileReader.stats() -} - -func (b *beFileReader) id() string { return b.b2fileReader.id() } - -func (b *beFileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) { - return b.name, b.sha, b.size, b.ct, b.info, b.status, b.stamp -} - -func (b *beFilePart) number() int { return b.b2filePart.number() } -func (b *beFilePart) sha1() string { return b.b2filePart.sha1() } -func (b *beFilePart) size() int64 { return b.b2filePart.size() } - -func (b *beKey) del(ctx context.Context) error { return b.k.del(ctx) } -func (b *beKey) caps() []string { return b.k.caps() } -func (b *beKey) name() string { return b.k.name() } -func (b *beKey) expires() time.Time { return b.k.expires() } -func (b *beKey) secret() string { return b.k.secret() } -func (b *beKey) id() string { return b.k.id() } - -func jitter(d time.Duration) time.Duration { - f := float64(d) - f /= 50 - f += f * (rand.Float64() - 0.5) - return time.Duration(f) -} - -func getBackoff(d time.Duration) time.Duration { - if d > 30*time.Second { - return 30*time.Second + jitter(d) - } - return d*2 + jitter(d*2) -} - -var after = time.After - -func withBackoff(ctx context.Context, ri beRootInterface, f func() error) error { - backoff := 500 * time.Millisecond - for { - err := f() - if !ri.transient(err) { - return err - } - bo := ri.backoff(err) - if bo > 0 { - backoff = bo - } else { - backoff = getBackoff(backoff) - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-after(backoff): - } - } -} - -func withReauth(ctx context.Context, ri beRootInterface, f func() error) error { - err := f() - if ri.reauth(err) { - if err := ri.reauthorizeAccount(ctx); err != nil { - return err - } - err = f() - } - return err -} diff --git a/vendor/github.com/kurin/blazer/b2/baseline.go b/vendor/github.com/kurin/blazer/b2/baseline.go deleted file mode 100644 index bfa4c80c9..000000000 --- a/vendor/github.com/kurin/blazer/b2/baseline.go +++ /dev/null @@ -1,517 +0,0 @@ -// Copyright 2016, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "context" - "io" - "net/http" - "time" - - "github.com/kurin/blazer/base" -) - -// This file wraps the base package in a thin layer, for testing. It should be -// the only file in b2 that imports base. - -type b2RootInterface interface { - authorizeAccount(context.Context, string, string, clientOptions) error - transient(error) bool - backoff(error) time.Duration - reauth(error) bool - reupload(error) bool - createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error) - listBuckets(context.Context) ([]b2BucketInterface, error) - createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error) - listKeys(context.Context, int, string) ([]b2KeyInterface, string, error) -} - -type b2BucketInterface interface { - name() string - btype() string - attrs() *BucketAttrs - id() string - updateBucket(context.Context, *BucketAttrs) error - deleteBucket(context.Context) error - getUploadURL(context.Context) (b2URLInterface, error) - startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (b2LargeFileInterface, error) - listFileNames(context.Context, int, string, string, string) ([]b2FileInterface, string, error) - listFileVersions(context.Context, int, string, string, string, string) ([]b2FileInterface, string, string, error) - listUnfinishedLargeFiles(context.Context, int, string) ([]b2FileInterface, string, error) - downloadFileByName(context.Context, string, int64, int64) (b2FileReaderInterface, error) - hideFile(context.Context, string) (b2FileInterface, error) - getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) - baseURL() string - file(string, string) b2FileInterface -} - -type b2URLInterface interface { - reload(context.Context) error - uploadFile(context.Context, io.Reader, int, string, string, string, map[string]string) (b2FileInterface, error) -} - -type b2FileInterface interface { - name() string - size() int64 - timestamp() time.Time - status() string - deleteFileVersion(context.Context) error - getFileInfo(context.Context) (b2FileInfoInterface, error) - listParts(context.Context, int, int) ([]b2FilePartInterface, int, error) - compileParts(int64, map[int]string) b2LargeFileInterface -} - -type b2LargeFileInterface interface { - finishLargeFile(context.Context) (b2FileInterface, error) - getUploadPartURL(context.Context) (b2FileChunkInterface, error) - cancel(context.Context) error -} - -type b2FileChunkInterface interface { - reload(context.Context) error - uploadPart(context.Context, io.Reader, string, int, int) (int, error) -} - -type b2FileReaderInterface interface { - io.ReadCloser - stats() (int, string, string, map[string]string) - id() string -} - -type b2FileInfoInterface interface { - stats() (string, string, int64, string, map[string]string, string, time.Time) // bleck -} - -type b2FilePartInterface interface { - number() int - sha1() string - size() int64 -} - -type b2KeyInterface interface { - del(context.Context) error - caps() []string - name() string - expires() time.Time - secret() string - id() string -} - -type b2Root struct { - b *base.B2 -} - -type b2Bucket struct { - b *base.Bucket -} - -type b2URL struct { - b *base.URL -} - -type b2File struct { - b *base.File -} - -type b2LargeFile struct { - b *base.LargeFile -} - -type b2FileChunk struct { - b *base.FileChunk -} - -type b2FileReader struct { - b *base.FileReader -} - -type b2FileInfo struct { - b *base.FileInfo -} - -type b2FilePart struct { - b *base.FilePart -} - -type b2Key struct { - b *base.Key -} - -func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error { - var aopts []base.AuthOption - ct := &clientTransport{client: c.client} - if c.transport != nil { - ct.rt = c.transport - } - aopts = append(aopts, base.Transport(ct)) - if c.failSomeUploads { - aopts = append(aopts, base.FailSomeUploads()) - } - if c.expireTokens { - aopts = append(aopts, base.ExpireSomeAuthTokens()) - } - if c.capExceeded { - aopts = append(aopts, base.ForceCapExceeded()) - } - if c.apiBase != "" { - aopts = append(aopts, base.SetAPIBase(c.apiBase)) - } - for _, agent := range c.userAgents { - aopts = append(aopts, base.UserAgent(agent)) - } - nb, err := base.AuthorizeAccount(ctx, account, key, aopts...) - if err != nil { - return err - } - if b.b == nil { - b.b = nb - return nil - } - b.b.Update(nb) - return nil -} - -func (*b2Root) backoff(err error) time.Duration { - if base.Action(err) != base.Retry { - return 0 - } - return base.Backoff(err) -} - -func (*b2Root) reauth(err error) bool { - return base.Action(err) == base.ReAuthenticate -} - -func (*b2Root) reupload(err error) bool { - return base.Action(err) == base.AttemptNewUpload -} - -func (*b2Root) transient(err error) bool { - return base.Action(err) == base.Retry -} - -func (b *b2Root) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (b2BucketInterface, error) { - var baseRules []base.LifecycleRule - for _, rule := range rules { - baseRules = append(baseRules, base.LifecycleRule{ - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - Prefix: rule.Prefix, - }) - } - bucket, err := b.b.CreateBucket(ctx, name, btype, info, baseRules) - if err != nil { - return nil, err - } - return &b2Bucket{bucket}, nil -} - -func (b *b2Root) listBuckets(ctx context.Context) ([]b2BucketInterface, error) { - buckets, err := b.b.ListBuckets(ctx) - if err != nil { - return nil, err - } - var rtn []b2BucketInterface - for _, bucket := range buckets { - rtn = append(rtn, &b2Bucket{bucket}) - } - return rtn, err -} - -func (b *b2Bucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error { - if attrs == nil { - return nil - } - if attrs.Type != UnknownType { - b.b.Type = string(attrs.Type) - } - if attrs.Info != nil { - b.b.Info = attrs.Info - } - if attrs.LifecycleRules != nil { - rules := []base.LifecycleRule{} - for _, rule := range attrs.LifecycleRules { - rules = append(rules, base.LifecycleRule{ - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - Prefix: rule.Prefix, - }) - } - b.b.LifecycleRules = rules - } - newBucket, err := b.b.Update(ctx) - if err == nil { - b.b = newBucket - } - code, _ := base.Code(err) - if code == 409 { - return b2err{ - err: err, - isUpdateConflict: true, - } - } - return err -} - -func (b *b2Root) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (b2KeyInterface, error) { - k, err := b.b.CreateKey(ctx, name, caps, valid, bucketID, prefix) - if err != nil { - return nil, err - } - return &b2Key{k}, nil -} - -func (b *b2Root) listKeys(ctx context.Context, max int, next string) ([]b2KeyInterface, string, error) { - keys, next, err := b.b.ListKeys(ctx, max, next) - if err != nil { - return nil, "", err - } - var k []b2KeyInterface - for _, key := range keys { - k = append(k, &b2Key{key}) - } - return k, next, nil -} - -func (b *b2Bucket) deleteBucket(ctx context.Context) error { - return b.b.DeleteBucket(ctx) -} - -func (b *b2Bucket) name() string { - return b.b.Name -} - -func (b *b2Bucket) btype() string { - return b.b.Type -} - -func (b *b2Bucket) attrs() *BucketAttrs { - var rules []LifecycleRule - for _, rule := range b.b.LifecycleRules { - rules = append(rules, LifecycleRule{ - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - Prefix: rule.Prefix, - }) - } - return &BucketAttrs{ - LifecycleRules: rules, - Info: b.b.Info, - Type: BucketType(b.b.Type), - } -} - -func (b *b2Bucket) id() string { return b.b.ID } - -func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) { - url, err := b.b.GetUploadURL(ctx) - if err != nil { - return nil, err - } - return &b2URL{url}, nil -} - -func (b *b2Bucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (b2LargeFileInterface, error) { - lf, err := b.b.StartLargeFile(ctx, name, ct, info) - if err != nil { - return nil, err - } - return &b2LargeFile{lf}, nil -} - -func (b *b2Bucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]b2FileInterface, string, error) { - fs, c, err := b.b.ListFileNames(ctx, count, continuation, prefix, delimiter) - if err != nil { - return nil, "", err - } - var files []b2FileInterface - for _, f := range fs { - files = append(files, &b2File{f}) - } - return files, c, nil -} - -func (b *b2Bucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]b2FileInterface, string, string, error) { - fs, name, id, err := b.b.ListFileVersions(ctx, count, nextName, nextID, prefix, delimiter) - if err != nil { - return nil, "", "", err - } - var files []b2FileInterface - for _, f := range fs { - files = append(files, &b2File{f}) - } - return files, name, id, nil -} - -func (b *b2Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]b2FileInterface, string, error) { - fs, cont, err := b.b.ListUnfinishedLargeFiles(ctx, count, continuation) - if err != nil { - return nil, "", err - } - var files []b2FileInterface - for _, f := range fs { - files = append(files, &b2File{f}) - } - return files, cont, nil -} - -func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (b2FileReaderInterface, error) { - fr, err := b.b.DownloadFileByName(ctx, name, offset, size) - if err != nil { - code, _ := base.Code(err) - switch code { - case http.StatusRequestedRangeNotSatisfiable: - return nil, errNoMoreContent - case http.StatusNotFound: - return nil, b2err{err: err, notFoundErr: true} - } - return nil, err - } - return &b2FileReader{fr}, nil -} - -func (b *b2Bucket) hideFile(ctx context.Context, name string) (b2FileInterface, error) { - f, err := b.b.HideFile(ctx, name) - if err != nil { - return nil, err - } - return &b2File{f}, nil -} - -func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) { - return b.b.GetDownloadAuthorization(ctx, p, v, s) -} - -func (b *b2Bucket) baseURL() string { - return b.b.BaseURL() -} - -func (b *b2Bucket) file(id, name string) b2FileInterface { return &b2File{b.b.File(id, name)} } - -func (b *b2URL) uploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (b2FileInterface, error) { - file, err := b.b.UploadFile(ctx, r, size, name, contentType, sha1, info) - if err != nil { - return nil, err - } - return &b2File{file}, nil -} - -func (b *b2URL) reload(ctx context.Context) error { - return b.b.Reload(ctx) -} - -func (b *b2File) deleteFileVersion(ctx context.Context) error { - return b.b.DeleteFileVersion(ctx) -} - -func (b *b2File) name() string { - return b.b.Name -} - -func (b *b2File) size() int64 { - return b.b.Size -} - -func (b *b2File) timestamp() time.Time { - return b.b.Timestamp -} - -func (b *b2File) status() string { - return b.b.Status -} - -func (b *b2File) getFileInfo(ctx context.Context) (b2FileInfoInterface, error) { - if b.b.Info != nil { - return &b2FileInfo{b.b.Info}, nil - } - fi, err := b.b.GetFileInfo(ctx) - if err != nil { - return nil, err - } - return &b2FileInfo{fi}, nil -} - -func (b *b2File) listParts(ctx context.Context, next, count int) ([]b2FilePartInterface, int, error) { - parts, n, err := b.b.ListParts(ctx, next, count) - if err != nil { - return nil, 0, err - } - var rtn []b2FilePartInterface - for _, part := range parts { - rtn = append(rtn, &b2FilePart{part}) - } - return rtn, n, nil -} - -func (b *b2File) compileParts(size int64, seen map[int]string) b2LargeFileInterface { - return &b2LargeFile{b.b.CompileParts(size, seen)} -} - -func (b *b2LargeFile) finishLargeFile(ctx context.Context) (b2FileInterface, error) { - f, err := b.b.FinishLargeFile(ctx) - if err != nil { - return nil, err - } - return &b2File{f}, nil -} - -func (b *b2LargeFile) getUploadPartURL(ctx context.Context) (b2FileChunkInterface, error) { - c, err := b.b.GetUploadPartURL(ctx) - if err != nil { - return nil, err - } - return &b2FileChunk{c}, nil -} - -func (b *b2LargeFile) cancel(ctx context.Context) error { - return b.b.CancelLargeFile(ctx) -} - -func (b *b2FileChunk) reload(ctx context.Context) error { - return b.b.Reload(ctx) -} - -func (b *b2FileChunk) uploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) { - return b.b.UploadPart(ctx, r, sha1, size, index) -} - -func (b *b2FileReader) Read(p []byte) (int, error) { - return b.b.Read(p) -} - -func (b *b2FileReader) Close() error { - return b.b.Close() -} - -func (b *b2FileReader) stats() (int, string, string, map[string]string) { - return b.b.ContentLength, b.b.ContentType, b.b.SHA1, b.b.Info -} - -func (b *b2FileReader) id() string { return b.b.ID } - -func (b *b2FileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) { - return b.b.Name, b.b.SHA1, b.b.Size, b.b.ContentType, b.b.Info, b.b.Status, b.b.Timestamp -} - -func (b *b2FilePart) number() int { return b.b.Number } -func (b *b2FilePart) sha1() string { return b.b.SHA1 } -func (b *b2FilePart) size() int64 { return b.b.Size } - -func (b *b2Key) del(ctx context.Context) error { return b.b.Delete(ctx) } -func (b *b2Key) caps() []string { return b.b.Capabilities } -func (b *b2Key) name() string { return b.b.Name } -func (b *b2Key) expires() time.Time { return b.b.Expires } -func (b *b2Key) secret() string { return b.b.Secret } -func (b *b2Key) id() string { return b.b.ID } diff --git a/vendor/github.com/kurin/blazer/b2/buffer.go b/vendor/github.com/kurin/blazer/b2/buffer.go deleted file mode 100644 index 4c62afb77..000000000 --- a/vendor/github.com/kurin/blazer/b2/buffer.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2017, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "bytes" - "crypto/sha1" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -type readResetter interface { - Read([]byte) (int, error) - Reset() error -} - -type resetter struct { - rs io.ReadSeeker -} - -func (r resetter) Read(p []byte) (int, error) { return r.rs.Read(p) } -func (r resetter) Reset() error { _, err := r.rs.Seek(0, 0); return err } - -func newResetter(p []byte) readResetter { return resetter{rs: bytes.NewReader(p)} } - -type writeBuffer interface { - io.Writer - Len() int - Reader() (readResetter, error) - Hash() string // sha1 or whatever it is - Close() error -} - -// nonBuffer doesn't buffer anything, but passes values directly from the -// source readseeker. Many nonBuffers can point at different parts of the same -// underlying source, and be accessed by multiple goroutines simultaneously. -func newNonBuffer(rs io.ReaderAt, offset, size int64) writeBuffer { - return &nonBuffer{ - r: io.NewSectionReader(rs, offset, size), - size: int(size), - hsh: sha1.New(), - } -} - -type nonBuffer struct { - r *io.SectionReader - size int - hsh hash.Hash - - isEOF bool - buf *strings.Reader -} - -func (nb *nonBuffer) Len() int { return nb.size + 40 } -func (nb *nonBuffer) Hash() string { return "hex_digits_at_end" } -func (nb *nonBuffer) Close() error { return nil } -func (nb *nonBuffer) Reader() (readResetter, error) { return nb, nil } -func (nb *nonBuffer) Write([]byte) (int, error) { return 0, errors.New("writes not supported") } - -func (nb *nonBuffer) Read(p []byte) (int, error) { - if nb.isEOF { - return nb.buf.Read(p) - } - n, err := io.TeeReader(nb.r, nb.hsh).Read(p) - if err == io.EOF { - err = nil - nb.isEOF = true - nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.hsh.Sum(nil))) - } - return n, err -} - -func (nb *nonBuffer) Reset() error { - nb.hsh.Reset() - nb.isEOF = false - _, err := nb.r.Seek(0, 0) - return err -} - -type memoryBuffer struct { - buf *bytes.Buffer - hsh hash.Hash - w io.Writer - mux sync.Mutex -} - -var bufpool *sync.Pool - -func init() { - bufpool = &sync.Pool{} - bufpool.New = func() interface{} { return &bytes.Buffer{} } -} - -func newMemoryBuffer() *memoryBuffer { - mb := &memoryBuffer{ - hsh: sha1.New(), - } - mb.buf = bufpool.Get().(*bytes.Buffer) - mb.w = io.MultiWriter(mb.hsh, mb.buf) - return mb -} - -func (mb *memoryBuffer) Write(p []byte) (int, error) { return mb.w.Write(p) } -func (mb *memoryBuffer) Len() int { return mb.buf.Len() } -func (mb *memoryBuffer) Reader() (readResetter, error) { return newResetter(mb.buf.Bytes()), nil } -func (mb *memoryBuffer) Hash() string { return fmt.Sprintf("%x", mb.hsh.Sum(nil)) } - -func (mb *memoryBuffer) Close() error { - mb.mux.Lock() - defer mb.mux.Unlock() - if mb.buf == nil { - return nil - } - mb.buf.Truncate(0) - bufpool.Put(mb.buf) - mb.buf = nil - return nil -} - -type fileBuffer struct { - f *os.File - hsh hash.Hash - w io.Writer - s int -} - -func newFileBuffer(loc string) (*fileBuffer, error) { - f, err := ioutil.TempFile(loc, "blazer") - if err != nil { - return nil, err - } - fb := &fileBuffer{ - f: f, - hsh: sha1.New(), - } - fb.w = io.MultiWriter(fb.f, fb.hsh) - return fb, nil -} - -func (fb *fileBuffer) Write(p []byte) (int, error) { - n, err := fb.w.Write(p) - fb.s += n - return n, err -} - -func (fb *fileBuffer) Len() int { return fb.s } -func (fb *fileBuffer) Hash() string { return fmt.Sprintf("%x", fb.hsh.Sum(nil)) } - -func (fb *fileBuffer) Reader() (readResetter, error) { - if _, err := fb.f.Seek(0, 0); err != nil { - return nil, err - } - return &fr{f: fb.f}, nil -} - -func (fb *fileBuffer) Close() error { - fb.f.Close() - return os.Remove(fb.f.Name()) -} - -// wraps *os.File so that the http package doesn't see it as an io.Closer -type fr struct { - f *os.File -} - -func (r *fr) Read(p []byte) (int, error) { return r.f.Read(p) } -func (r *fr) Reset() error { _, err := r.f.Seek(0, 0); return err } diff --git a/vendor/github.com/kurin/blazer/b2/iterator.go b/vendor/github.com/kurin/blazer/b2/iterator.go deleted file mode 100644 index c85ab0119..000000000 --- a/vendor/github.com/kurin/blazer/b2/iterator.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2018, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "context" - "io" - "sync" -) - -// List returns an iterator for selecting objects in a bucket. The default -// behavior, with no options, is to list all currently un-hidden objects. -func (b *Bucket) List(ctx context.Context, opts ...ListOption) *ObjectIterator { - o := &ObjectIterator{ - bucket: b, - ctx: ctx, - } - for _, opt := range opts { - opt(&o.opts) - } - return o -} - -// ObjectIterator abtracts away the tricky bits of iterating over a bucket's -// contents. -// -// It is intended to be called in a loop: -// for iter.Next() { -// obj := iter.Object() -// // act on obj -// } -// if err := iter.Err(); err != nil { -// // handle err -// } -type ObjectIterator struct { - bucket *Bucket - ctx context.Context - final bool - err error - idx int - c *cursor - opts objectIteratorOptions - objs []*Object - init sync.Once - l lister - count int -} - -type lister func(context.Context, int, *cursor) ([]*Object, *cursor, error) - -func (o *ObjectIterator) page(ctx context.Context) error { - if o.opts.locker != nil { - o.opts.locker.Lock() - defer o.opts.locker.Unlock() - } - objs, c, err := o.l(ctx, o.count, o.c) - if err != nil && err != io.EOF { - if bNotExist.MatchString(err.Error()) { - return b2err{ - err: err, - notFoundErr: true, - } - } - return err - } - o.c = c - o.objs = objs - o.idx = 0 - if err == io.EOF { - o.final = true - } - return nil -} - -// Next advances the iterator to the next object. It should be called before -// any calls to Object(). If Next returns true, then the next call to Object() -// will be valid. Once Next returns false, it is important to check the return -// value of Err(). -func (o *ObjectIterator) Next() bool { - o.init.Do(func() { - o.count = o.opts.pageSize - if o.count < 0 || o.count > 1000 { - o.count = 1000 - } - switch { - case o.opts.unfinished: - o.l = o.bucket.listUnfinishedLargeFiles - if o.count > 100 { - o.count = 100 - } - case o.opts.hidden: - o.l = o.bucket.listObjects - default: - o.l = o.bucket.listCurrentObjects - } - o.c = &cursor{ - prefix: o.opts.prefix, - delimiter: o.opts.delimiter, - } - }) - if o.err != nil { - return false - } - if o.ctx.Err() != nil { - o.err = o.ctx.Err() - return false - } - if o.idx >= len(o.objs) { - if o.final { - o.err = io.EOF - return false - } - if err := o.page(o.ctx); err != nil { - o.err = err - return false - } - return o.Next() - } - o.idx++ - return true -} - -// Object returns the current object. -func (o *ObjectIterator) Object() *Object { - return o.objs[o.idx-1] -} - -// Err returns the current error or nil. If Next() returns false and Err() is -// nil, then all objects have been seen. -func (o *ObjectIterator) Err() error { - if o.err == io.EOF { - return nil - } - return o.err -} - -type objectIteratorOptions struct { - hidden bool - unfinished bool - prefix string - delimiter string - pageSize int - locker sync.Locker -} - -// A ListOption alters the default behavor of List. -type ListOption func(*objectIteratorOptions) - -// ListHidden will include hidden objects in the output. -func ListHidden() ListOption { - return func(o *objectIteratorOptions) { - o.hidden = true - } -} - -// ListUnfinished will list unfinished large file operations instead of -// existing objects. -func ListUnfinished() ListOption { - return func(o *objectIteratorOptions) { - o.unfinished = true - } -} - -// ListPrefix will restrict the output to objects whose names begin with -// prefix. -func ListPrefix(pfx string) ListOption { - return func(o *objectIteratorOptions) { - o.prefix = pfx - } -} - -// ListDelimiter denotes the path separator. If set, object listings will be -// truncated at this character. -// -// For example, if the bucket contains objects foo/bar, foo/baz, and foo, -// then a delimiter of "/" will cause the listing to return "foo" and "foo/". -// Otherwise, the listing would have returned all object names. -// -// Note that objects returned that end in the delimiter may not be actual -// objects, e.g. you cannot read from (or write to, or delete) an object -// "foo/", both because no actual object exists and because B2 disallows object -// names that end with "/". If you want to ensure that all objects returned -// are actual objects, leave this unset. -func ListDelimiter(delimiter string) ListOption { - return func(o *objectIteratorOptions) { - o.delimiter = delimiter - } -} - -// ListPageSize configures the iterator to request the given number of objects -// per network round-trip. The default (and maximum) is 1000 objects, except -// for unfinished large files, which is 100. -func ListPageSize(count int) ListOption { - return func(o *objectIteratorOptions) { - o.pageSize = count - } -} - -// ListLocker passes the iterator a lock which will be held during network -// round-trips. -func ListLocker(l sync.Locker) ListOption { - return func(o *objectIteratorOptions) { - o.locker = l - } -} - -type cursor struct { - // Prefix limits the listed objects to those that begin with this string. - prefix string - - // Delimiter denotes the path separator. If set, object listings will be - // truncated at this character. - // - // For example, if the bucket contains objects foo/bar, foo/baz, and foo, - // then a delimiter of "/" will cause the listing to return "foo" and "foo/". - // Otherwise, the listing would have returned all object names. - // - // Note that objects returned that end in the delimiter may not be actual - // objects, e.g. you cannot read from (or write to, or delete) an object "foo/", - // both because no actual object exists and because B2 disallows object names - // that end with "/". If you want to ensure that all objects returned by - // ListObjects and ListCurrentObjects are actual objects, leave this unset. - delimiter string - - name string - id string -} - -func (b *Bucket) listObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { - if c == nil { - c = &cursor{} - } - fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.prefix, c.delimiter) - if err != nil { - return nil, nil, err - } - var next *cursor - if name != "" && id != "" { - next = &cursor{ - prefix: c.prefix, - delimiter: c.delimiter, - name: name, - id: id, - } - } - var objects []*Object - for _, f := range fs { - objects = append(objects, &Object{ - name: f.name(), - f: f, - b: b, - }) - } - var rtnErr error - if len(objects) == 0 || next == nil { - rtnErr = io.EOF - } - return objects, next, rtnErr -} - -func (b *Bucket) listCurrentObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { - if c == nil { - c = &cursor{} - } - fs, name, err := b.b.listFileNames(ctx, count, c.name, c.prefix, c.delimiter) - if err != nil { - return nil, nil, err - } - var next *cursor - if name != "" { - next = &cursor{ - prefix: c.prefix, - delimiter: c.delimiter, - name: name, - } - } - var objects []*Object - for _, f := range fs { - objects = append(objects, &Object{ - name: f.name(), - f: f, - b: b, - }) - } - var rtnErr error - if len(objects) == 0 || next == nil { - rtnErr = io.EOF - } - return objects, next, rtnErr -} - -func (b *Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { - if c == nil { - c = &cursor{} - } - fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name) - if err != nil { - return nil, nil, err - } - var next *cursor - if name != "" { - next = &cursor{ - name: name, - } - } - var objects []*Object - for _, f := range fs { - objects = append(objects, &Object{ - name: f.name(), - f: f, - b: b, - }) - } - var rtnErr error - if len(objects) == 0 || next == nil { - rtnErr = io.EOF - } - return objects, next, rtnErr -} diff --git a/vendor/github.com/kurin/blazer/b2/key.go b/vendor/github.com/kurin/blazer/b2/key.go deleted file mode 100644 index 68649a665..000000000 --- a/vendor/github.com/kurin/blazer/b2/key.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2018, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "context" - "errors" - "io" - "time" -) - -// Key is a B2 application key. A Key grants limited access on a global or -// per-bucket basis. -type Key struct { - c *Client - k beKeyInterface -} - -// Capabilities returns the list of capabilites granted by this application -// key. -func (k *Key) Capabilities() []string { return k.k.caps() } - -// Name returns the user-supplied name of this application key. Key names are -// useless. -func (k *Key) Name() string { return k.k.name() } - -// Expires returns the expiration date of this application key. -func (k *Key) Expires() time.Time { return k.k.expires() } - -// Delete removes the key from B2. -func (k *Key) Delete(ctx context.Context) error { return k.k.del(ctx) } - -// Secret returns the value that should be passed into NewClient(). It is only -// available on newly created keys; it is not available from ListKey -// operations. -func (k *Key) Secret() string { return k.k.secret() } - -// ID returns the application key ID. This, plus the secret, is necessary to -// authenticate to B2. -func (k *Key) ID() string { return k.k.id() } - -type keyOptions struct { - caps []string - prefix string - lifetime time.Duration -} - -// KeyOption specifies desired properties for application keys. -type KeyOption func(*keyOptions) - -// Lifetime requests a key with the given lifetime. -func Lifetime(d time.Duration) KeyOption { - return func(k *keyOptions) { - k.lifetime = d - } -} - -// Deadline requests a key that expires after the given date. -func Deadline(t time.Time) KeyOption { - d := t.Sub(time.Now()) - return Lifetime(d) -} - -// Capabilities requests a key with the given capability. -func Capabilities(caps ...string) KeyOption { - return func(k *keyOptions) { - k.caps = append(k.caps, caps...) - } -} - -// Prefix limits the requested application key to be valid only for objects -// that begin with prefix. This can only be used when requesting an -// application key within a specific bucket. -func Prefix(prefix string) KeyOption { - return func(k *keyOptions) { - k.prefix = prefix - } -} - -// CreateKey creates a global application key that is valid for all buckets in -// this project. The key's secret will only be accessible on the object -// returned from this call. -func (c *Client) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) { - var ko keyOptions - for _, o := range opts { - o(&ko) - } - if ko.prefix != "" { - return nil, errors.New("Prefix is not a valid option for global application keys") - } - ki, err := c.backend.createKey(ctx, name, ko.caps, ko.lifetime, "", "") - if err != nil { - return nil, err - } - return &Key{ - c: c, - k: ki, - }, nil -} - -// ListKeys lists all the keys associated with this project. It takes the -// maximum number of keys it should return in a call, as well as a cursor -// (which should be empty for the initial call). It will return up to count -// keys, as well as the cursor for the next invocation. -// -// ListKeys returns io.EOF when there are no more keys, although it may do so -// concurrently with the final set of keys. -func (c *Client) ListKeys(ctx context.Context, count int, cursor string) ([]*Key, string, error) { - ks, next, err := c.backend.listKeys(ctx, count, cursor) - if err != nil { - return nil, "", err - } - if len(ks) == 0 { - return nil, "", io.EOF - } - var keys []*Key - for _, k := range ks { - keys = append(keys, &Key{ - c: c, - k: k, - }) - } - var rerr error - if next == "" { - rerr = io.EOF - } - return keys, next, rerr -} - -// CreateKey creates a scoped application key that is valid only for this bucket. -func (b *Bucket) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) { - var ko keyOptions - for _, o := range opts { - o(&ko) - } - ki, err := b.r.createKey(ctx, name, ko.caps, ko.lifetime, b.b.id(), ko.prefix) - if err != nil { - return nil, err - } - return &Key{ - c: b.c, - k: ki, - }, nil -} diff --git a/vendor/github.com/kurin/blazer/b2/monitor.go b/vendor/github.com/kurin/blazer/b2/monitor.go deleted file mode 100644 index ccf5d1322..000000000 --- a/vendor/github.com/kurin/blazer/b2/monitor.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2017, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "fmt" - "html/template" - "math" - "net/http" - "sort" - "time" - - "github.com/kurin/blazer/internal/b2assets" - "github.com/kurin/blazer/x/window" -) - -// StatusInfo reports information about a client. -type StatusInfo struct { - // Writers contains the status of all current uploads with progress. - Writers map[string]*WriterStatus - - // Readers contains the status of all current downloads with progress. - Readers map[string]*ReaderStatus - - // RPCs contains information about recently made RPC calls over the last - // minute, five minutes, hour, and for all time. - RPCs map[time.Duration]MethodList -} - -// MethodList is an accumulation of RPC calls that have been made over a given -// period of time. -type MethodList []method - -// CountByMethod returns the total RPC calls made per method. -func (ml MethodList) CountByMethod() map[string]int { - r := make(map[string]int) - for i := range ml { - r[ml[i].name]++ - } - return r -} - -type method struct { - name string - duration time.Duration - status int -} - -type methodCounter struct { - d time.Duration - w *window.Window -} - -func (mc methodCounter) record(m method) { - mc.w.Insert([]method{m}) -} - -func (mc methodCounter) retrieve() MethodList { - ms := mc.w.Reduce() - return MethodList(ms.([]method)) -} - -func newMethodCounter(d, res time.Duration) methodCounter { - r := func(i, j interface{}) interface{} { - a, ok := i.([]method) - if !ok { - a = nil - } - b, ok := j.([]method) - if !ok { - b = nil - } - for _, m := range b { - a = append(a, m) - } - return a - } - return methodCounter{ - d: d, - w: window.New(d, res, r), - } -} - -// WriterStatus reports the status for each writer. -type WriterStatus struct { - // Progress is a slice of completion ratios. The index of a ratio is its - // chunk id less one. - Progress []float64 -} - -// ReaderStatus reports the status for each reader. -type ReaderStatus struct { - // Progress is a slice of completion ratios. The index of a ratio is its - // chunk id less one. - Progress []float64 -} - -// Status returns information about the current state of the client. -func (c *Client) Status() *StatusInfo { - c.slock.Lock() - defer c.slock.Unlock() - - si := &StatusInfo{ - Writers: make(map[string]*WriterStatus), - Readers: make(map[string]*ReaderStatus), - RPCs: make(map[time.Duration]MethodList), - } - - for name, w := range c.sWriters { - si.Writers[name] = w.status() - } - - for name, r := range c.sReaders { - si.Readers[name] = r.status() - } - - for _, c := range c.sMethods { - si.RPCs[c.d] = c.retrieve() - } - - return si -} - -func (si *StatusInfo) table() map[string]map[string]int { - r := make(map[string]map[string]int) - for d, c := range si.RPCs { - for _, m := range c { - if _, ok := r[m.name]; !ok { - r[m.name] = make(map[string]int) - } - dur := "all time" - if d > 0 { - dur = d.String() - } - r[m.name][dur]++ - } - } - return r -} - -func (c *Client) addWriter(w *Writer) { - c.slock.Lock() - defer c.slock.Unlock() - - if c.sWriters == nil { - c.sWriters = make(map[string]*Writer) - } - - c.sWriters[fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)] = w -} - -func (c *Client) removeWriter(w *Writer) { - c.slock.Lock() - defer c.slock.Unlock() - - if c.sWriters == nil { - return - } - - delete(c.sWriters, fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)) -} - -func (c *Client) addReader(r *Reader) { - c.slock.Lock() - defer c.slock.Unlock() - - if c.sReaders == nil { - c.sReaders = make(map[string]*Reader) - } - - c.sReaders[fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)] = r -} - -func (c *Client) removeReader(r *Reader) { - c.slock.Lock() - defer c.slock.Unlock() - - if c.sReaders == nil { - return - } - - delete(c.sReaders, fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)) -} - -var ( - funcMap = template.FuncMap{ - "inc": func(i int) int { return i + 1 }, - "lookUp": func(m map[string]int, s string) int { return m[s] }, - "pRange": func(i int) string { - f := float64(i) - min := int(math.Pow(2, f)) - 1 - max := min + int(math.Pow(2, f)) - return fmt.Sprintf("%v - %v", time.Duration(min)*time.Millisecond, time.Duration(max)*time.Millisecond) - }, - "methods": func(si *StatusInfo) []string { - methods := make(map[string]bool) - for _, ms := range si.RPCs { - for _, m := range ms { - methods[m.name] = true - } - } - var names []string - for name := range methods { - names = append(names, name) - } - sort.Strings(names) - return names - }, - "durations": func(si *StatusInfo) []string { - var ds []time.Duration - for d := range si.RPCs { - ds = append(ds, d) - } - sort.Slice(ds, func(i, j int) bool { return ds[i] < ds[j] }) - var r []string - for _, d := range ds { - dur := "all time" - if d > 0 { - dur = d.String() - } - r = append(r, dur) - } - return r - }, - "table": func(si *StatusInfo) map[string]map[string]int { return si.table() }, - } - statusTemplate = template.Must(template.New("status").Funcs(funcMap).Parse(string(b2assets.MustAsset("data/status.html")))) -) - -// ServeHTTP serves diagnostic information about the current state of the -// client; essentially everything available from Client.Status() -// -// ServeHTTP satisfies the http.Handler interface. This means that a Client -// can be passed directly to a path via http.Handle (or on a custom ServeMux or -// a custom http.Server). -func (c *Client) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - info := c.Status() - statusTemplate.Execute(rw, info) -} diff --git a/vendor/github.com/kurin/blazer/b2/reader.go b/vendor/github.com/kurin/blazer/b2/reader.go deleted file mode 100644 index e8e85ac0c..000000000 --- a/vendor/github.com/kurin/blazer/b2/reader.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright 2016, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "bytes" - "context" - "crypto/sha1" - "errors" - "fmt" - "hash" - "io" - "sync" - "time" - - "github.com/kurin/blazer/internal/blog" -) - -var errNoMoreContent = errors.New("416: out of content") - -// Reader reads files from B2. -type Reader struct { - // ConcurrentDownloads is the number of simultaneous downloads to pull from - // B2. Values greater than one will cause B2 to make multiple HTTP requests - // for a given file, increasing available bandwidth at the cost of buffering - // the downloads in memory. - ConcurrentDownloads int - - // ChunkSize is the size to fetch per ConcurrentDownload. The default is - // 10MB. - ChunkSize int - - ctx context.Context - cancel context.CancelFunc // cancels ctx - o *Object - name string - offset int64 // the start of the file - length int64 // the length to read, or -1 - csize int // chunk size - read int // amount read - chwid int // chunks written - chrid int // chunks read - chbuf chan *rchunk - init sync.Once - chunks map[int]*rchunk - vrfy hash.Hash - readOffEnd bool - sha1 string - - rmux sync.Mutex // guards rcond - rcond *sync.Cond - - emux sync.RWMutex // guards err, believe it or not - err error - - smux sync.Mutex - smap map[int]*meteredReader -} - -type rchunk struct { - bytes.Buffer - final bool -} - -// Close frees resources associated with the download. -func (r *Reader) Close() error { - r.cancel() - r.o.b.c.removeReader(r) - return nil -} - -func (r *Reader) setErr(err error) { - r.emux.Lock() - defer r.emux.Unlock() - if r.err == nil { - r.err = err - r.cancel() - } -} - -func (r *Reader) setErrNoCancel(err error) { - r.emux.Lock() - defer r.emux.Unlock() - if r.err == nil { - r.err = err - } -} - -func (r *Reader) getErr() error { - r.emux.RLock() - defer r.emux.RUnlock() - return r.err -} - -func (r *Reader) thread() { - go func() { - for { - var buf *rchunk - select { - case b, ok := <-r.chbuf: - if !ok { - return - } - buf = b - case <-r.ctx.Done(): - return - } - r.rmux.Lock() - chunkID := r.chwid - r.chwid++ - r.rmux.Unlock() - offset := int64(chunkID*r.csize) + r.offset - size := int64(r.csize) - if r.length > 0 { - if size > r.length { - buf.final = true - size = r.length - } - r.length -= size - } - var b backoff - redo: - fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size) - if err == errNoMoreContent { - // this read generated a 416 so we are entirely past the end of the object - r.readOffEnd = true - buf.final = true - r.rmux.Lock() - r.chunks[chunkID] = buf - r.rmux.Unlock() - r.rcond.Broadcast() - return - } - if err != nil { - r.setErr(err) - r.rcond.Broadcast() - return - } - rsize, _, sha1, _ := fr.stats() - if len(sha1) == 40 && r.sha1 != sha1 { - r.sha1 = sha1 - } - mr := &meteredReader{r: noopResetter{fr}, size: int(rsize)} - r.smux.Lock() - r.smap[chunkID] = mr - r.smux.Unlock() - i, err := copyContext(r.ctx, buf, mr) - fr.Close() - r.smux.Lock() - r.smap[chunkID] = nil - r.smux.Unlock() - if i < int64(rsize) || err == io.ErrUnexpectedEOF { - // Probably the network connection was closed early. Retry. - blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying after %v", chunkID, i, rsize, b) - if err := b.wait(r.ctx); err != nil { - r.setErr(err) - r.rcond.Broadcast() - return - } - buf.Reset() - goto redo - } - if err != nil { - r.setErr(err) - r.rcond.Broadcast() - return - } - r.rmux.Lock() - r.chunks[chunkID] = buf - r.rmux.Unlock() - r.rcond.Broadcast() - } - }() -} - -func (r *Reader) curChunk() (*rchunk, error) { - ch := make(chan *rchunk) - go func() { - r.rmux.Lock() - defer r.rmux.Unlock() - for r.chunks[r.chrid] == nil && r.getErr() == nil && r.ctx.Err() == nil { - r.rcond.Wait() - } - select { - case ch <- r.chunks[r.chrid]: - case <-r.ctx.Done(): - return - } - }() - select { - case buf := <-ch: - return buf, r.getErr() - case <-r.ctx.Done(): - if r.getErr() != nil { - return nil, r.getErr() - } - return nil, r.ctx.Err() - } -} - -func (r *Reader) initFunc() { - r.smux.Lock() - r.smap = make(map[int]*meteredReader) - r.smux.Unlock() - r.o.b.c.addReader(r) - r.rcond = sync.NewCond(&r.rmux) - cr := r.ConcurrentDownloads - if cr < 1 { - cr = 1 - } - if r.ChunkSize < 1 { - r.ChunkSize = 1e7 - } - r.csize = r.ChunkSize - r.chbuf = make(chan *rchunk, cr) - for i := 0; i < cr; i++ { - r.thread() - r.chbuf <- &rchunk{} - } - r.vrfy = sha1.New() -} - -func (r *Reader) Read(p []byte) (int, error) { - if err := r.getErr(); err != nil { - return 0, err - } - r.init.Do(r.initFunc) - chunk, err := r.curChunk() - if err != nil { - r.setErrNoCancel(err) - return 0, err - } - n, err := chunk.Read(p) - r.vrfy.Write(p[:n]) // Hash.Write never returns an error. - r.read += n - if err == io.EOF { - if chunk.final { - close(r.chbuf) - r.setErrNoCancel(err) - return n, err - } - r.chrid++ - chunk.Reset() - r.chbuf <- chunk - err = nil - } - r.setErrNoCancel(err) - return n, err -} - -func (r *Reader) status() *ReaderStatus { - r.smux.Lock() - defer r.smux.Unlock() - - rs := &ReaderStatus{ - Progress: make([]float64, len(r.smap)), - } - - for i := 1; i <= len(r.smap); i++ { - rs.Progress[i-1] = r.smap[i].done() - } - - return rs -} - -// Verify checks the SHA1 hash on download and compares it to the SHA1 hash -// submitted on upload. If the two differ, this returns an error. If the -// correct hash could not be calculated (if, for example, the entire object was -// not read, or if the object was uploaded as a "large file" and thus the SHA1 -// hash was not sent), this returns (nil, false). -func (r *Reader) Verify() (error, bool) { - got := fmt.Sprintf("%x", r.vrfy.Sum(nil)) - if r.sha1 == got { - return nil, true - } - // TODO: if the exact length of the file is requested AND the checksum is - // bad, this will return (nil, false) instead of (an error, true). This is - // because there's no good way that I can tell to determine that we've hit - // the end of the file without reading off the end. Consider reading N+1 - // bytes at the very end to close this hole. - if r.offset > 0 || !r.readOffEnd || len(r.sha1) != 40 { - return nil, false - } - return fmt.Errorf("bad hash: got %v, want %v", got, r.sha1), true -} - -// strip a writer of any non-Write methods -type onlyWriter struct{ w io.Writer } - -func (ow onlyWriter) Write(p []byte) (int, error) { return ow.w.Write(p) } - -func copyContext(ctx context.Context, w io.Writer, r io.Reader) (int64, error) { - var n int64 - var err error - done := make(chan struct{}) - go func() { - if _, ok := w.(*Writer); ok { - w = onlyWriter{w} - } - n, err = io.Copy(w, r) - close(done) - }() - select { - case <-done: - return n, err - case <-ctx.Done(): - return 0, ctx.Err() - } -} - -type noopResetter struct { - io.Reader -} - -func (noopResetter) Reset() error { return nil } - -type backoff time.Duration - -func (b *backoff) wait(ctx context.Context) error { - if *b == 0 { - *b = backoff(time.Millisecond) - } - select { - case <-time.After(time.Duration(*b)): - if time.Duration(*b) < time.Second*10 { - *b <<= 1 - } - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (b backoff) String() string { - return time.Duration(b).String() -} diff --git a/vendor/github.com/kurin/blazer/b2/readerat.go b/vendor/github.com/kurin/blazer/b2/readerat.go deleted file mode 100644 index 84ee982bb..000000000 --- a/vendor/github.com/kurin/blazer/b2/readerat.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "io" - "sync" -) - -type readerAt struct { - rs io.ReadSeeker - mu sync.Mutex -} - -func (r *readerAt) ReadAt(p []byte, off int64) (int, error) { - r.mu.Lock() - defer r.mu.Unlock() - - // ReadAt is supposed to preserve the offset. - cur, err := r.rs.Seek(0, io.SeekCurrent) - if err != nil { - return 0, err - } - defer r.rs.Seek(cur, io.SeekStart) - - if _, err := r.rs.Seek(off, io.SeekStart); err != nil { - return 0, err - } - return io.ReadFull(r.rs, p) -} - -// wraps a ReadSeeker in a mutex to provite a ReaderAt how is this not in the -// io package? -func enReaderAt(rs io.ReadSeeker) io.ReaderAt { - return &readerAt{rs: rs} -} diff --git a/vendor/github.com/kurin/blazer/b2/writer.go b/vendor/github.com/kurin/blazer/b2/writer.go deleted file mode 100644 index cce60e726..000000000 --- a/vendor/github.com/kurin/blazer/b2/writer.go +++ /dev/null @@ -1,613 +0,0 @@ -// Copyright 2016, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b2 - -import ( - "context" - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - "time" - - "github.com/kurin/blazer/internal/blog" -) - -// Writer writes data into Backblaze. It automatically switches to the large -// file API if the file exceeds ChunkSize bytes. Due to that and other -// Backblaze API details, there is a large buffer. -// -// Changes to public Writer attributes must be made before the first call to -// Write. -type Writer struct { - // ConcurrentUploads is number of different threads sending data concurrently - // to Backblaze for large files. This can increase performance greatly, as - // each thread will hit a different endpoint. However, there is a ChunkSize - // buffer for each thread. Values less than 1 are equivalent to 1. - ConcurrentUploads int - - // Resume an upload. If true, and the upload is a large file, and a file of - // the same name was started but not finished, then assume that we are - // resuming that file, and don't upload duplicate chunks. - Resume bool - - // ChunkSize is the size, in bytes, of each individual part, when writing - // large files, and also when determining whether to upload a file normally - // or when to split it into parts. The default is 100M (1e8) The minimum is - // 5M (5e6); values less than this are not an error, but will fail. The - // maximum is 5GB (5e9). - ChunkSize int - - // UseFileBuffer controls whether to use an in-memory buffer (the default) or - // scratch space on the file system. If this is true, b2 will save chunks in - // FileBufferDir. - UseFileBuffer bool - - // FileBufferDir specifies the directory where scratch files are kept. If - // blank, os.TempDir() is used. - FileBufferDir string - - contentType string - info map[string]string - - csize int - ctx context.Context - cancel context.CancelFunc // cancels ctx - ctxf func() context.Context - errf func(error) - ready chan chunk - cdone chan struct{} - wg sync.WaitGroup - start sync.Once - once sync.Once - done sync.Once - file beLargeFileInterface - seen map[int]string - everStarted bool - newBuffer func() (writeBuffer, error) - - o *Object - name string - - cidx int - w writeBuffer - - emux sync.RWMutex - err error - - smux sync.RWMutex - smap map[int]*meteredReader -} - -type chunk struct { - id int - buf writeBuffer -} - -func (w *Writer) setErr(err error) { - if err == nil || err == io.EOF { - return - } - w.emux.Lock() - defer w.emux.Unlock() - if w.err != nil { - return - } - blog.V(1).Infof("error writing %s: %v", w.name, err) - w.err = err - w.cancel() - if w.ctxf == nil { - return - } - if w.errf == nil { - w.errf = func(error) {} - } - w.errf(w.file.cancel(w.ctxf())) -} - -func (w *Writer) getErr() error { - w.emux.RLock() - defer w.emux.RUnlock() - return w.err -} - -func (w *Writer) registerChunk(id int, r *meteredReader) { - w.smux.Lock() - w.smap[id] = r - w.smux.Unlock() -} - -func (w *Writer) completeChunk(id int) { - w.smux.Lock() - w.smap[id] = nil - w.smux.Unlock() -} - -var gid int32 - -func sleepCtx(ctx context.Context, d time.Duration) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(d): - return nil - } -} - -func (w *Writer) thread() { - w.wg.Add(1) - go func() { - defer w.wg.Done() - id := atomic.AddInt32(&gid, 1) - fc, err := w.file.getUploadPartURL(w.ctx) - if err != nil { - w.setErr(err) - return - } - for { - var cnk chunk - select { - case cnk = <-w.ready: - case <-w.cdone: - return - } - if sha, ok := w.seen[cnk.id]; ok { - if sha != cnk.buf.Hash() { - w.setErr(errors.New("resumable upload was requested, but chunks don't match")) - return - } - cnk.buf.Close() - w.completeChunk(cnk.id) - blog.V(2).Infof("skipping chunk %d", cnk.id) - continue - } - blog.V(2).Infof("thread %d handling chunk %d", id, cnk.id) - r, err := cnk.buf.Reader() - if err != nil { - w.setErr(err) - return - } - mr := &meteredReader{r: r, size: cnk.buf.Len()} - w.registerChunk(cnk.id, mr) - sleep := time.Millisecond * 15 - redo: - n, err := fc.uploadPart(w.ctx, mr, cnk.buf.Hash(), cnk.buf.Len(), cnk.id) - if n != cnk.buf.Len() || err != nil { - if w.o.b.r.reupload(err) { - if err := sleepCtx(w.ctx, sleep); err != nil { - w.setErr(err) - w.completeChunk(cnk.id) - cnk.buf.Close() // TODO: log error - } - sleep *= 2 - if sleep > time.Second*15 { - sleep = time.Second * 15 - } - blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, cnk.buf.Len(), err) - f, err := w.file.getUploadPartURL(w.ctx) - if err != nil { - w.setErr(err) - w.completeChunk(cnk.id) - cnk.buf.Close() // TODO: log error - return - } - fc = f - goto redo - } - w.setErr(err) - w.completeChunk(cnk.id) - cnk.buf.Close() // TODO: log error - return - } - w.completeChunk(cnk.id) - cnk.buf.Close() // TODO: log error - blog.V(2).Infof("chunk %d handled", cnk.id) - } - }() -} - -func (w *Writer) init() { - w.start.Do(func() { - w.everStarted = true - w.smux.Lock() - w.smap = make(map[int]*meteredReader) - w.smux.Unlock() - w.o.b.c.addWriter(w) - w.csize = w.ChunkSize - if w.csize == 0 { - w.csize = 1e8 - } - if w.newBuffer == nil { - w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil } - if w.UseFileBuffer { - w.newBuffer = func() (writeBuffer, error) { return newFileBuffer(w.FileBufferDir) } - } - } - v, err := w.newBuffer() - if err != nil { - w.setErr(err) - return - } - w.w = v - }) -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - w.init() - if err := w.getErr(); err != nil { - return 0, err - } - left := w.csize - w.w.Len() - if len(p) < left { - return w.w.Write(p) - } - i, err := w.w.Write(p[:left]) - if err != nil { - w.setErr(err) - return i, err - } - if err := w.sendChunk(); err != nil { - w.setErr(err) - return i, w.getErr() - } - k, err := w.Write(p[left:]) - if err != nil { - w.setErr(err) - } - return i + k, err -} - -func (w *Writer) getUploadURL(ctx context.Context) (beURLInterface, error) { - u := w.o.b.urlPool.get() - if u == nil { - return w.o.b.b.getUploadURL(w.ctx) - } - - return u, nil -} - -func (w *Writer) simpleWriteFile() error { - ue, err := w.getUploadURL(w.ctx) - if err != nil { - return err - } - // This defer needs to be in a func() so that we put whatever the value of ue - // is at function exit. - defer func() { w.o.b.urlPool.put(ue) }() - sha1 := w.w.Hash() - ctype := w.contentType - if ctype == "" { - ctype = "application/octet-stream" - } - r, err := w.w.Reader() - if err != nil { - return err - } - mr := &meteredReader{r: r, size: w.w.Len()} - w.registerChunk(1, mr) - defer w.completeChunk(1) -redo: - f, err := ue.uploadFile(w.ctx, mr, int(w.w.Len()), w.name, ctype, sha1, w.info) - if err != nil { - if w.o.b.r.reupload(err) { - blog.V(2).Infof("b2 writer: %v; retrying", err) - u, err := w.o.b.b.getUploadURL(w.ctx) - if err != nil { - return err - } - ue = u - goto redo - } - return err - } - w.o.f = f - return nil -} - -func (w *Writer) getLargeFile() (beLargeFileInterface, error) { - if !w.Resume { - ctype := w.contentType - if ctype == "" { - ctype = "application/octet-stream" - } - return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info) - } - var got bool - iter := w.o.b.List(w.ctx, ListPrefix(w.name), ListUnfinished()) - var fi beFileInterface - for iter.Next() { - obj := iter.Object() - if obj.Name() == w.name { - got = true - fi = obj.f - } - } - if iter.Err() != nil { - return nil, iter.Err() - } - if !got { - w.Resume = false - return w.getLargeFile() - } - - next := 1 - seen := make(map[int]string) - var size int64 - for { - parts, n, err := fi.listParts(w.ctx, next, 100) - if err != nil { - return nil, err - } - next = n - for _, p := range parts { - seen[p.number()] = p.sha1() - size += p.size() - } - if len(parts) == 0 { - break - } - if next == 0 { - break - } - } - w.seen = make(map[int]string) // copy the map - for id, sha := range seen { - w.seen[id] = sha - } - return fi.compileParts(size, seen), nil -} - -func (w *Writer) sendChunk() error { - var err error - w.once.Do(func() { - lf, e := w.getLargeFile() - if e != nil { - err = e - return - } - w.file = lf - w.ready = make(chan chunk) - w.cdone = make(chan struct{}) - if w.ConcurrentUploads < 1 { - w.ConcurrentUploads = 1 - } - for i := 0; i < w.ConcurrentUploads; i++ { - w.thread() - } - }) - if err != nil { - return err - } - select { - case <-w.cdone: - return nil - case w.ready <- chunk{ - id: w.cidx + 1, - buf: w.w, - }: - case <-w.ctx.Done(): - return w.ctx.Err() - } - w.cidx++ - v, err := w.newBuffer() - if err != nil { - return err - } - w.w = v - return nil -} - -// ReadFrom reads all of r into w, returning the first error or no error if r -// returns io.EOF. If r is also an io.Seeker, ReadFrom will stream r directly -// over the wire instead of buffering it locally. This reduces memory usage. -// -// Do not issue multiple calls to ReadFrom, or mix ReadFrom and Write. If you -// have multiple readers you want to concatenate into the same B2 object, use -// an io.MultiReader. -// -// Note that io.Copy will automatically choose to use ReadFrom. -// -// ReadFrom currently doesn't handle w.Resume; if w.Resume is true, ReadFrom -// will act as if r is not an io.Seeker. -func (w *Writer) ReadFrom(r io.Reader) (int64, error) { - rs, ok := r.(io.ReadSeeker) - if !ok || w.Resume { - return copyContext(w.ctx, w, r) - } - blog.V(2).Info("streaming without buffer") - size, err := rs.Seek(0, io.SeekEnd) - if err != nil { - return 0, err - } - var ra io.ReaderAt - if rat, ok := r.(io.ReaderAt); ok { - ra = rat - } else { - ra = enReaderAt(rs) - } - var offset int64 - var wrote int64 - w.newBuffer = func() (writeBuffer, error) { - left := size - offset - if left <= 0 { - // We're done sending real chunks; send empty chunks from now on so that - // Close() works. - w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil } - w.w = newMemoryBuffer() - return nil, io.EOF - } - csize := int64(w.csize) - if left < csize { - csize = left - } - nb := newNonBuffer(ra, offset, csize) - wrote += csize // TODO: this is kind of a total lie - offset += csize - return nb, nil - } - w.init() - if size < int64(w.csize) { - // the magic happens on w.Close() - return size, nil - } - for { - if err := w.sendChunk(); err != nil { - if err != io.EOF { - return wrote, err - } - return wrote, nil - } - } -} - -// Close satisfies the io.Closer interface. It is critical to check the return -// value of Close for all writers. -func (w *Writer) Close() error { - w.done.Do(func() { - if !w.everStarted { - w.init() - w.setErr(w.simpleWriteFile()) - return - } - defer w.o.b.c.removeWriter(w) - defer func() { - if err := w.w.Close(); err != nil { - // this is non-fatal, but alarming - blog.V(1).Infof("close %s: %v", w.name, err) - } - }() - if w.cidx == 0 { - w.setErr(w.simpleWriteFile()) - return - } - if w.w.Len() > 0 { - if err := w.sendChunk(); err != nil { - w.setErr(err) - return - } - } - // See https://github.com/kurin/blazer/issues/60 for why we use a special - // channel for this. - close(w.cdone) - w.wg.Wait() - f, err := w.file.finishLargeFile(w.ctx) - if err != nil { - w.setErr(err) - return - } - w.o.f = f - }) - return w.getErr() -} - -// WithAttrs sets the writable attributes of the resulting file to given -// values. WithAttrs must be called before the first call to Write. -// -// DEPRECATED: Use WithAttrsOption instead. -func (w *Writer) WithAttrs(attrs *Attrs) *Writer { - w.contentType = attrs.ContentType - w.info = make(map[string]string) - for k, v := range attrs.Info { - w.info[k] = v - } - if len(w.info) < 10 && attrs.SHA1 != "" { - w.info["large_file_sha1"] = attrs.SHA1 - } - if len(w.info) < 10 && !attrs.LastModified.IsZero() { - w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6) - } - return w -} - -// A WriterOption sets Writer-specific behavior. -type WriterOption func(*Writer) - -// WithAttrs attaches the given Attrs to the writer. -func WithAttrsOption(attrs *Attrs) WriterOption { - return func(w *Writer) { - w.WithAttrs(attrs) - } -} - -// WithCancelOnError requests the writer, if it has started a large file -// upload, to call b2_cancel_large_file on any permanent error. It calls ctxf -// to obtain a context with which to cancel the file; this is to allow callers -// to set specific timeouts. If errf is non-nil, then it is called with the -// (possibly nil) output of b2_cancel_large_file. -func WithCancelOnError(ctxf func() context.Context, errf func(error)) WriterOption { - return func(w *Writer) { - w.ctxf = ctxf - w.errf = errf - } -} - -// DefaultWriterOptions returns a ClientOption that will apply the given -// WriterOptions to every Writer. These options can be overridden by passing -// new options to NewWriter. -func DefaultWriterOptions(opts ...WriterOption) ClientOption { - return func(c *clientOptions) { - c.writerOpts = opts - } -} - -func (w *Writer) status() *WriterStatus { - w.smux.RLock() - defer w.smux.RUnlock() - - ws := &WriterStatus{ - Progress: make([]float64, len(w.smap)), - } - - for i := 1; i <= len(w.smap); i++ { - ws.Progress[i-1] = w.smap[i].done() - } - - return ws -} - -type meteredReader struct { - read int64 - size int - r readResetter - mux sync.Mutex -} - -func (mr *meteredReader) Read(p []byte) (int, error) { - mr.mux.Lock() - defer mr.mux.Unlock() - n, err := mr.r.Read(p) - mr.read += int64(n) - return n, err -} - -func (mr *meteredReader) Reset() error { - mr.mux.Lock() - defer mr.mux.Unlock() - mr.read = 0 - return mr.r.Reset() -} - -func (mr *meteredReader) done() float64 { - if mr == nil { - return 1 - } - read := float64(atomic.LoadInt64(&mr.read)) - return read / float64(mr.size) -} diff --git a/vendor/github.com/kurin/blazer/base/base.go b/vendor/github.com/kurin/blazer/base/base.go deleted file mode 100644 index 8ca095051..000000000 --- a/vendor/github.com/kurin/blazer/base/base.go +++ /dev/null @@ -1,1298 +0,0 @@ -// Copyright 2016, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package base provides a very low-level interface on top of the B2 v1 API. -// It is not intended to be used directly. -// -// It currently lacks support for the following APIs: -// -// b2_download_file_by_id -package base - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/kurin/blazer/internal/b2types" - "github.com/kurin/blazer/internal/blog" -) - -const ( - APIBase = "https://api.backblazeb2.com" - DefaultUserAgent = "blazer/0.5.3" -) - -type b2err struct { - msg string - method string - retry int - code int -} - -func (e b2err) Error() string { - if e.method == "" { - return fmt.Sprintf("b2 error: %s", e.msg) - } - return fmt.Sprintf("%s: %d: %s", e.method, e.code, e.msg) -} - -// Action checks an error and returns a recommended course of action. -func Action(err error) ErrAction { - e, ok := err.(b2err) - if !ok { - return Punt - } - if e.retry > 0 { - return Retry - } - if e.code >= 500 && e.code < 600 && (e.method == "b2_upload_file" || e.method == "b2_upload_part") { - return AttemptNewUpload - } - switch e.code { - case 401: - switch e.method { - case "b2_authorize_account": - return Punt - case "b2_upload_file", "b2_upload_part": - return AttemptNewUpload - } - return ReAuthenticate - case 400: - // See restic/restic#1207 - if e.method == "b2_upload_file" && strings.HasPrefix(e.msg, "more than one upload using auth token") { - return AttemptNewUpload - } - return Punt - case 408: - return AttemptNewUpload - case 429, 500, 503: - return Retry - } - return Punt -} - -// ErrAction is an action that a caller can take when any function returns an -// error. -type ErrAction int - -// Code returns the error code and message. -func Code(err error) (int, string) { - e, ok := err.(b2err) - if !ok { - return 0, "" - } - return e.code, e.msg -} - -const ( - // ReAuthenticate indicates that the B2 account authentication tokens have - // expired, and should be refreshed with a new call to AuthorizeAccount. - ReAuthenticate ErrAction = iota - - // AttemptNewUpload indicates that an upload's authentication token (or URL - // endpoint) has expired, and that users should request new ones with a call - // to GetUploadURL or GetUploadPartURL. - AttemptNewUpload - - // Retry indicates that the caller should wait an appropriate amount of time, - // and then reattempt the RPC. - Retry - - // Punt means that there is no useful action to be taken on this error, and - // that it should be displayed to the user. - Punt -) - -func mkErr(resp *http.Response) error { - data, err := ioutil.ReadAll(resp.Body) - var msgBody string - if err != nil { - msgBody = fmt.Sprintf("couldn't read message body: %v", err) - } - logResponse(resp, data) - msg := &b2types.ErrorMessage{} - if err := json.Unmarshal(data, msg); err != nil { - if msgBody != "" { - msgBody = fmt.Sprintf("couldn't read message body: %v", err) - } - } - if msgBody == "" { - msgBody = msg.Msg - } - var retryAfter int - retry := resp.Header.Get("Retry-After") - if retry != "" { - r, err := strconv.ParseInt(retry, 10, 64) - if err != nil { - r = 0 - blog.V(1).Infof("couldn't parse retry-after header %q: %v", retry, err) - } - retryAfter = int(r) - } - return b2err{ - msg: msgBody, - retry: retryAfter, - code: resp.StatusCode, - method: resp.Request.Header.Get("X-Blazer-Method"), - } -} - -// Backoff returns an appropriate amount of time to wait, given an error, if -// any was returned by the server. If the return value is 0, but Action -// indicates Retry, the user should implement their own exponential backoff, -// beginning with one second. -func Backoff(err error) time.Duration { - e, ok := err.(b2err) - if !ok { - return 0 - } - return time.Duration(e.retry) * time.Second -} - -func logRequest(req *http.Request, args []byte) { - if !blog.V(2) { - return - } - var headers []string - for k, v := range req.Header { - if k == "Authorization" || k == "X-Blazer-Method" { - continue - } - headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) - } - hstr := strings.Join(headers, ";") - method := req.Header.Get("X-Blazer-Method") - if args != nil { - blog.V(2).Infof(">> %s uri: %v headers: {%s} args: (%s)", method, req.URL, hstr, string(args)) - return - } - blog.V(2).Infof(">> %s uri: %v {%s} (no args)", method, req.URL, hstr) -} - -var authRegexp = regexp.MustCompile(`"authorizationToken": ".[^"]*"`) - -func logResponse(resp *http.Response, reply []byte) { - if !blog.V(2) { - return - } - var headers []string - for k, v := range resp.Header { - headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) - } - hstr := strings.Join(headers, "; ") - method := resp.Request.Header.Get("X-Blazer-Method") - id := resp.Request.Header.Get("X-Blazer-Request-ID") - if reply != nil { - safe := string(authRegexp.ReplaceAll(reply, []byte(`"authorizationToken": "[redacted]"`))) - blog.V(2).Infof("<< %s (%s) %s {%s} (%s)", method, id, resp.Status, hstr, safe) - return - } - blog.V(2).Infof("<< %s (%s) %s {%s} (no reply)", method, id, resp.Status, hstr) -} - -func millitime(t int64) time.Time { - return time.Unix(t/1000, t%1000*1e6) -} - -type b2Options struct { - transport http.RoundTripper - failSomeUploads bool - expireTokens bool - capExceeded bool - apiBase string - userAgent string -} - -func (o *b2Options) addHeaders(req *http.Request) { - if o.failSomeUploads { - req.Header.Add("X-Bz-Test-Mode", "fail_some_uploads") - } - if o.expireTokens { - req.Header.Add("X-Bz-Test-Mode", "expire_some_account_authorization_tokens") - } - if o.capExceeded { - req.Header.Add("X-Bz-Test-Mode", "force_cap_exceeded") - } - req.Header.Set("User-Agent", o.getUserAgent()) -} - -func (o *b2Options) getAPIBase() string { - if o.apiBase != "" { - return o.apiBase - } - return APIBase -} - -func (o *b2Options) getUserAgent() string { - if o.userAgent != "" { - return fmt.Sprintf("%s %s", o.userAgent, DefaultUserAgent) - } - return DefaultUserAgent -} - -func (o *b2Options) getTransport() http.RoundTripper { - if o.transport == nil { - return http.DefaultTransport - } - return o.transport -} - -// B2 holds account information for Backblaze. -type B2 struct { - accountID string - authToken string - apiURI string - downloadURI string - minPartSize int - opts *b2Options - bucket string // restricted to this bucket if present - pfx string // restricted to objects with this prefix if present -} - -// Update replaces the B2 object with a new one, in-place. -func (b *B2) Update(n *B2) { - b.accountID = n.accountID - b.authToken = n.authToken - b.apiURI = n.apiURI - b.downloadURI = n.downloadURI - b.minPartSize = n.minPartSize - b.opts = n.opts -} - -type httpReply struct { - resp *http.Response - err error -} - -func makeNetRequest(ctx context.Context, req *http.Request, rt http.RoundTripper) (*http.Response, error) { - req = req.WithContext(ctx) - resp, err := rt.RoundTrip(req) - switch err { - case nil: - return resp, nil - case context.Canceled, context.DeadlineExceeded: - return nil, err - default: - method := req.Header.Get("X-Blazer-Method") - blog.V(2).Infof(">> %s uri: %v err: %v", method, req.URL, err) - return nil, b2err{ - msg: err.Error(), - retry: 1, - } - } -} - -type requestBody struct { - size int64 - body io.Reader -} - -func (rb *requestBody) getSize() int64 { - if rb == nil { - return 0 - } - return rb.size -} - -func (rb *requestBody) getBody() io.Reader { - if rb == nil { - return nil - } - if rb.getSize() == 0 { - // https://github.com/kurin/blazer/issues/57 - // When body is non-nil, but the request's ContentLength is 0, it is - // replaced with -1, which causes the client to send a chunked encoding, - // which confuses B2. - return http.NoBody - } - return rb.body -} - -type keepFinalBytes struct { - r io.Reader - remain int - sha [40]byte -} - -func (k *keepFinalBytes) Read(p []byte) (int, error) { - n, err := k.r.Read(p) - if k.remain-n > 40 { - k.remain -= n - return n, err - } - // This was a whole lot harder than it looks. - pi := -40 + k.remain - if pi < 0 { - pi = 0 - } - pe := n - ki := 40 - k.remain - if ki < 0 { - ki = 0 - } - ke := n - k.remain + 40 - copy(k.sha[ki:ke], p[pi:pe]) - k.remain -= n - return n, err -} - -var reqID int64 - -func (o *b2Options) makeRequest(ctx context.Context, method, verb, uri string, b2req, b2resp interface{}, headers map[string]string, body *requestBody) error { - var args []byte - if b2req != nil { - enc, err := json.Marshal(b2req) - if err != nil { - return err - } - args = enc - body = &requestBody{ - body: bytes.NewBuffer(enc), - size: int64(len(enc)), - } - } - req, err := http.NewRequest(verb, uri, body.getBody()) - if err != nil { - return err - } - req.ContentLength = body.getSize() - for k, v := range headers { - if strings.HasPrefix(k, "X-Bz-Info") || strings.HasPrefix(k, "X-Bz-File-Name") { - v = escape(v) - } - req.Header.Set(k, v) - } - req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) - req.Header.Set("X-Blazer-Method", method) - o.addHeaders(req) - logRequest(req, args) - resp, err := makeNetRequest(ctx, req, o.getTransport()) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return mkErr(resp) - } - var replyArgs []byte - if b2resp != nil { - rbuf := &bytes.Buffer{} - r := io.TeeReader(resp.Body, rbuf) - decoder := json.NewDecoder(r) - if err := decoder.Decode(b2resp); err != nil { - return err - } - replyArgs = rbuf.Bytes() - } else { - ra, err := ioutil.ReadAll(resp.Body) - if err != nil { - blog.V(1).Infof("%s: couldn't read response: %v", method, err) - } - replyArgs = ra - } - logResponse(resp, replyArgs) - return nil -} - -// AuthorizeAccount wraps b2_authorize_account. -func AuthorizeAccount(ctx context.Context, account, key string, opts ...AuthOption) (*B2, error) { - auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", account, key))) - b2resp := &b2types.AuthorizeAccountResponse{} - headers := map[string]string{ - "Authorization": fmt.Sprintf("Basic %s", auth), - } - b2opts := &b2Options{} - for _, f := range opts { - f(b2opts) - } - if err := b2opts.makeRequest(ctx, "b2_authorize_account", "GET", b2opts.getAPIBase()+b2types.V1api+"b2_authorize_account", nil, b2resp, headers, nil); err != nil { - return nil, err - } - return &B2{ - accountID: b2resp.AccountID, - authToken: b2resp.AuthToken, - apiURI: b2resp.URI, - downloadURI: b2resp.DownloadURI, - minPartSize: b2resp.PartSize, - bucket: b2resp.Allowed.Bucket, - pfx: b2resp.Allowed.Prefix, - opts: b2opts, - }, nil -} - -// An AuthOption allows callers to choose per-session settings. -type AuthOption func(*b2Options) - -// UserAgent sets the User-Agent HTTP header. The default header is -// "blazer/"; the value set here will be prepended to that. This can -// be set multiple times. -func UserAgent(agent string) AuthOption { - return func(o *b2Options) { - if o.userAgent == "" { - o.userAgent = agent - return - } - o.userAgent = fmt.Sprintf("%s %s", agent, o.userAgent) - } -} - -// Transport returns an AuthOption that sets the underlying HTTP mechanism. -func Transport(rt http.RoundTripper) AuthOption { - return func(o *b2Options) { - o.transport = rt - } -} - -// FailSomeUploads requests intermittent upload failures from the B2 service. -// This is mostly useful for testing. -func FailSomeUploads() AuthOption { - return func(o *b2Options) { - o.failSomeUploads = true - } -} - -// ExpireSomeAuthTokens requests intermittent authentication failures from the -// B2 service. -func ExpireSomeAuthTokens() AuthOption { - return func(o *b2Options) { - o.expireTokens = true - } -} - -// ForceCapExceeded requests a cap limit from the B2 service. This causes all -// uploads to be treated as if they would exceed the configure B2 capacity. -func ForceCapExceeded() AuthOption { - return func(o *b2Options) { - o.capExceeded = true - } -} - -// SetAPIBase returns an AuthOption that uses the given URL as the base for API -// requests. -func SetAPIBase(url string) AuthOption { - return func(o *b2Options) { - o.apiBase = url - } -} - -type LifecycleRule struct { - Prefix string - DaysNewUntilHidden int - DaysHiddenUntilDeleted int -} - -// CreateBucket wraps b2_create_bucket. -func (b *B2) CreateBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (*Bucket, error) { - if btype != "allPublic" { - btype = "allPrivate" - } - var b2rules []b2types.LifecycleRule - for _, rule := range rules { - b2rules = append(b2rules, b2types.LifecycleRule{ - Prefix: rule.Prefix, - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - }) - } - b2req := &b2types.CreateBucketRequest{ - AccountID: b.accountID, - Name: name, - Type: btype, - Info: info, - LifecycleRules: b2rules, - } - b2resp := &b2types.CreateBucketResponse{} - headers := map[string]string{ - "Authorization": b.authToken, - } - if err := b.opts.makeRequest(ctx, "b2_create_bucket", "POST", b.apiURI+b2types.V1api+"b2_create_bucket", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - var respRules []LifecycleRule - for _, rule := range b2resp.LifecycleRules { - respRules = append(respRules, LifecycleRule{ - Prefix: rule.Prefix, - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - }) - } - return &Bucket{ - Name: name, - Info: b2resp.Info, - LifecycleRules: respRules, - ID: b2resp.BucketID, - rev: b2resp.Revision, - b2: b, - }, nil -} - -// DeleteBucket wraps b2_delete_bucket. -func (b *Bucket) DeleteBucket(ctx context.Context) error { - b2req := &b2types.DeleteBucketRequest{ - AccountID: b.b2.accountID, - BucketID: b.ID, - } - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - return b.b2.opts.makeRequest(ctx, "b2_delete_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_delete_bucket", b2req, nil, headers, nil) -} - -// Bucket holds B2 bucket details. -type Bucket struct { - Name string - Type string - Info map[string]string - LifecycleRules []LifecycleRule - ID string - rev int - b2 *B2 -} - -// Update wraps b2_update_bucket. -func (b *Bucket) Update(ctx context.Context) (*Bucket, error) { - var rules []b2types.LifecycleRule - for _, rule := range b.LifecycleRules { - rules = append(rules, b2types.LifecycleRule{ - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - Prefix: rule.Prefix, - }) - } - b2req := &b2types.UpdateBucketRequest{ - AccountID: b.b2.accountID, - BucketID: b.ID, - // Name: b.Name, - Type: b.Type, - Info: b.Info, - LifecycleRules: rules, - IfRevisionIs: b.rev, - } - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - b2resp := &b2types.UpdateBucketResponse{} - if err := b.b2.opts.makeRequest(ctx, "b2_update_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_update_bucket", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - var respRules []LifecycleRule - for _, rule := range b2resp.LifecycleRules { - respRules = append(respRules, LifecycleRule{ - Prefix: rule.Prefix, - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - }) - } - return &Bucket{ - Name: b.Name, - Type: b2resp.Type, - Info: b2resp.Info, - LifecycleRules: respRules, - ID: b2resp.BucketID, - b2: b.b2, - }, nil -} - -// BaseURL returns the base part of the download URLs. -func (b *Bucket) BaseURL() string { - return b.b2.downloadURI -} - -// ListBuckets wraps b2_list_buckets. -func (b *B2) ListBuckets(ctx context.Context) ([]*Bucket, error) { - b2req := &b2types.ListBucketsRequest{ - AccountID: b.accountID, - Bucket: b.bucket, - } - b2resp := &b2types.ListBucketsResponse{} - headers := map[string]string{ - "Authorization": b.authToken, - } - if err := b.opts.makeRequest(ctx, "b2_list_buckets", "POST", b.apiURI+b2types.V1api+"b2_list_buckets", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - var buckets []*Bucket - for _, bucket := range b2resp.Buckets { - var rules []LifecycleRule - for _, rule := range bucket.LifecycleRules { - rules = append(rules, LifecycleRule{ - Prefix: rule.Prefix, - DaysNewUntilHidden: rule.DaysNewUntilHidden, - DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, - }) - } - buckets = append(buckets, &Bucket{ - Name: bucket.Name, - Type: bucket.Type, - Info: bucket.Info, - LifecycleRules: rules, - ID: bucket.BucketID, - rev: bucket.Revision, - b2: b, - }) - } - return buckets, nil -} - -// URL holds information from the b2_get_upload_url API. -type URL struct { - uri string - token string - b2 *B2 - bucket *Bucket -} - -// Reload reloads URL in-place, by reissuing a b2_get_upload_url and -// overwriting the previous values. -func (url *URL) Reload(ctx context.Context) error { - n, err := url.bucket.GetUploadURL(ctx) - if err != nil { - return err - } - url.uri = n.uri - url.token = n.token - return nil -} - -// GetUploadURL wraps b2_get_upload_url. -func (b *Bucket) GetUploadURL(ctx context.Context) (*URL, error) { - b2req := &b2types.GetUploadURLRequest{ - BucketID: b.ID, - } - b2resp := &b2types.GetUploadURLResponse{} - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - if err := b.b2.opts.makeRequest(ctx, "b2_get_upload_url", "POST", b.b2.apiURI+b2types.V1api+"b2_get_upload_url", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - return &URL{ - uri: b2resp.URI, - token: b2resp.Token, - b2: b.b2, - bucket: b, - }, nil -} - -// File represents a B2 file. -type File struct { - Name string - Size int64 - Status string - Timestamp time.Time - Info *FileInfo - id string - b2 *B2 -} - -// File returns a bare File struct, but with the appropriate id and b2 -// interfaces. -func (b *Bucket) File(id, name string) *File { - return &File{id: id, b2: b.b2, Name: name} -} - -// UploadFile wraps b2_upload_file. -func (url *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (*File, error) { - headers := map[string]string{ - "Authorization": url.token, - "X-Bz-File-Name": name, - "Content-Type": contentType, - "Content-Length": fmt.Sprintf("%d", size), - "X-Bz-Content-Sha1": sha1, - } - for k, v := range info { - headers[fmt.Sprintf("X-Bz-Info-%s", k)] = v - } - b2resp := &b2types.UploadFileResponse{} - if err := url.b2.opts.makeRequest(ctx, "b2_upload_file", "POST", url.uri, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil { - return nil, err - } - return &File{ - Name: name, - Size: int64(size), - Timestamp: millitime(b2resp.Timestamp), - Status: b2resp.Action, - id: b2resp.FileID, - b2: url.b2, - }, nil -} - -// DeleteFileVersion wraps b2_delete_file_version. -func (f *File) DeleteFileVersion(ctx context.Context) error { - b2req := &b2types.DeleteFileVersionRequest{ - Name: f.Name, - FileID: f.id, - } - headers := map[string]string{ - "Authorization": f.b2.authToken, - } - return f.b2.opts.makeRequest(ctx, "b2_delete_file_version", "POST", f.b2.apiURI+b2types.V1api+"b2_delete_file_version", b2req, nil, headers, nil) -} - -// LargeFile holds information necessary to implement B2 large file support. -type LargeFile struct { - id string - b2 *B2 - - mu sync.Mutex - size int64 - hashes map[int]string -} - -// StartLargeFile wraps b2_start_large_file. -func (b *Bucket) StartLargeFile(ctx context.Context, name, contentType string, info map[string]string) (*LargeFile, error) { - b2req := &b2types.StartLargeFileRequest{ - BucketID: b.ID, - Name: name, - ContentType: contentType, - Info: info, - } - b2resp := &b2types.StartLargeFileResponse{} - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - if err := b.b2.opts.makeRequest(ctx, "b2_start_large_file", "POST", b.b2.apiURI+b2types.V1api+"b2_start_large_file", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - return &LargeFile{ - id: b2resp.ID, - b2: b.b2, - hashes: make(map[int]string), - }, nil -} - -// CancelLargeFile wraps b2_cancel_large_file. -func (l *LargeFile) CancelLargeFile(ctx context.Context) error { - b2req := &b2types.CancelLargeFileRequest{ - ID: l.id, - } - headers := map[string]string{ - "Authorization": l.b2.authToken, - } - return l.b2.opts.makeRequest(ctx, "b2_cancel_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_cancel_large_file", b2req, nil, headers, nil) -} - -// FilePart is a piece of a started, but not finished, large file upload. -type FilePart struct { - Number int - SHA1 string - Size int64 -} - -// ListParts wraps b2_list_parts. -func (f *File) ListParts(ctx context.Context, next, count int) ([]*FilePart, int, error) { - b2req := &b2types.ListPartsRequest{ - ID: f.id, - Start: next, - Count: count, - } - b2resp := &b2types.ListPartsResponse{} - headers := map[string]string{ - "Authorization": f.b2.authToken, - } - if err := f.b2.opts.makeRequest(ctx, "b2_list_parts", "POST", f.b2.apiURI+b2types.V1api+"b2_list_parts", b2req, b2resp, headers, nil); err != nil { - return nil, 0, err - } - var parts []*FilePart - for _, part := range b2resp.Parts { - parts = append(parts, &FilePart{ - Number: part.Number, - SHA1: part.SHA1, - Size: part.Size, - }) - } - return parts, b2resp.Next, nil -} - -// CompileParts returns a LargeFile that can accept new data. Seen is a -// mapping of completed part numbers to SHA1 strings; size is the total size of -// all the completed parts to this point. -func (f *File) CompileParts(size int64, seen map[int]string) *LargeFile { - s := make(map[int]string) - for k, v := range seen { - s[k] = v - } - return &LargeFile{ - id: f.id, - b2: f.b2, - size: size, - hashes: s, - } -} - -// FileChunk holds information necessary for uploading file chunks. -type FileChunk struct { - url string - token string - file *LargeFile -} - -type getUploadPartURLRequest struct { - ID string `json:"fileId"` -} - -type getUploadPartURLResponse struct { - URL string `json:"uploadUrl"` - Token string `json:"authorizationToken"` -} - -// GetUploadPartURL wraps b2_get_upload_part_url. -func (l *LargeFile) GetUploadPartURL(ctx context.Context) (*FileChunk, error) { - b2req := &getUploadPartURLRequest{ - ID: l.id, - } - b2resp := &getUploadPartURLResponse{} - headers := map[string]string{ - "Authorization": l.b2.authToken, - } - if err := l.b2.opts.makeRequest(ctx, "b2_get_upload_part_url", "POST", l.b2.apiURI+b2types.V1api+"b2_get_upload_part_url", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - return &FileChunk{ - url: b2resp.URL, - token: b2resp.Token, - file: l, - }, nil -} - -// Reload reloads FileChunk in-place. -func (fc *FileChunk) Reload(ctx context.Context) error { - n, err := fc.file.GetUploadPartURL(ctx) - if err != nil { - return err - } - fc.url = n.url - fc.token = n.token - return nil -} - -// UploadPart wraps b2_upload_part. -func (fc *FileChunk) UploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) { - headers := map[string]string{ - "Authorization": fc.token, - "X-Bz-Part-Number": fmt.Sprintf("%d", index), - "Content-Length": fmt.Sprintf("%d", size), - "X-Bz-Content-Sha1": sha1, - } - if sha1 == "hex_digits_at_end" { - r = &keepFinalBytes{r: r, remain: size} - } - if err := fc.file.b2.opts.makeRequest(ctx, "b2_upload_part", "POST", fc.url, nil, nil, headers, &requestBody{body: r, size: int64(size)}); err != nil { - return 0, err - } - fc.file.mu.Lock() - if sha1 == "hex_digits_at_end" { - sha1 = string(r.(*keepFinalBytes).sha[:]) - } - fc.file.hashes[index] = sha1 - fc.file.size += int64(size) - fc.file.mu.Unlock() - return size, nil -} - -// FinishLargeFile wraps b2_finish_large_file. -func (l *LargeFile) FinishLargeFile(ctx context.Context) (*File, error) { - l.mu.Lock() - defer l.mu.Unlock() - b2req := &b2types.FinishLargeFileRequest{ - ID: l.id, - Hashes: make([]string, len(l.hashes)), - } - b2resp := &b2types.FinishLargeFileResponse{} - for k, v := range l.hashes { - if len(b2req.Hashes) < k { - return nil, fmt.Errorf("b2_finish_large_file: invalid index %d", k) - } - b2req.Hashes[k-1] = v - } - headers := map[string]string{ - "Authorization": l.b2.authToken, - } - if err := l.b2.opts.makeRequest(ctx, "b2_finish_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_finish_large_file", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - return &File{ - Name: b2resp.Name, - Size: l.size, - Timestamp: millitime(b2resp.Timestamp), - Status: b2resp.Action, - id: b2resp.FileID, - b2: l.b2, - }, nil -} - -// ListUnfinishedLargeFiles wraps b2_list_unfinished_large_files. -func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]*File, string, error) { - b2req := &b2types.ListUnfinishedLargeFilesRequest{ - BucketID: b.ID, - Continuation: continuation, - Count: count, - } - b2resp := &b2types.ListUnfinishedLargeFilesResponse{} - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - if err := b.b2.opts.makeRequest(ctx, "b2_list_unfinished_large_files", "POST", b.b2.apiURI+b2types.V1api+"b2_list_unfinished_large_files", b2req, b2resp, headers, nil); err != nil { - return nil, "", err - } - cont := b2resp.Continuation - var files []*File - for _, f := range b2resp.Files { - files = append(files, &File{ - Name: f.Name, - Timestamp: millitime(f.Timestamp), - b2: b.b2, - id: f.FileID, - Info: &FileInfo{ - Name: f.Name, - ContentType: f.ContentType, - Info: f.Info, - Timestamp: millitime(f.Timestamp), - }, - }) - } - return files, cont, nil -} - -// ListFileNames wraps b2_list_file_names. -func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]*File, string, error) { - if prefix == "" { - prefix = b.b2.pfx - } - b2req := &b2types.ListFileNamesRequest{ - Count: count, - Continuation: continuation, - BucketID: b.ID, - Prefix: prefix, - Delimiter: delimiter, - } - b2resp := &b2types.ListFileNamesResponse{} - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - if err := b.b2.opts.makeRequest(ctx, "b2_list_file_names", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_names", b2req, b2resp, headers, nil); err != nil { - return nil, "", err - } - cont := b2resp.Continuation - var files []*File - for _, f := range b2resp.Files { - files = append(files, &File{ - Name: f.Name, - Size: f.Size, - Status: f.Action, - Timestamp: millitime(f.Timestamp), - Info: &FileInfo{ - Name: f.Name, - SHA1: f.SHA1, - Size: f.Size, - ContentType: f.ContentType, - Info: f.Info, - Status: f.Action, - Timestamp: millitime(f.Timestamp), - }, - id: f.FileID, - b2: b.b2, - }) - } - return files, cont, nil -} - -// ListFileVersions wraps b2_list_file_versions. -func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, startID, prefix, delimiter string) ([]*File, string, string, error) { - if prefix == "" { - prefix = b.b2.pfx - } - b2req := &b2types.ListFileVersionsRequest{ - BucketID: b.ID, - Count: count, - StartName: startName, - StartID: startID, - Prefix: prefix, - Delimiter: delimiter, - } - b2resp := &b2types.ListFileVersionsResponse{} - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - if err := b.b2.opts.makeRequest(ctx, "b2_list_file_versions", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_versions", b2req, b2resp, headers, nil); err != nil { - return nil, "", "", err - } - var files []*File - for _, f := range b2resp.Files { - files = append(files, &File{ - Name: f.Name, - Size: f.Size, - Status: f.Action, - Timestamp: millitime(f.Timestamp), - Info: &FileInfo{ - Name: f.Name, - SHA1: f.SHA1, - Size: f.Size, - ContentType: f.ContentType, - Info: f.Info, - Status: f.Action, - Timestamp: millitime(f.Timestamp), - }, - id: f.FileID, - b2: b.b2, - }) - } - return files, b2resp.NextName, b2resp.NextID, nil -} - -// GetDownloadAuthorization wraps b2_get_download_authorization. -func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration, contentDisposition string) (string, error) { - b2req := &b2types.GetDownloadAuthorizationRequest{ - BucketID: b.ID, - Prefix: prefix, - Valid: int(valid.Seconds()), - ContentDisposition: contentDisposition, - } - b2resp := &b2types.GetDownloadAuthorizationResponse{} - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - if err := b.b2.opts.makeRequest(ctx, "b2_get_download_authorization", "POST", b.b2.apiURI+b2types.V1api+"b2_get_download_authorization", b2req, b2resp, headers, nil); err != nil { - return "", err - } - return b2resp.Token, nil -} - -// FileReader is an io.ReadCloser that downloads a file from B2. -type FileReader struct { - io.ReadCloser - ContentLength int - ContentType string - SHA1 string - ID string - Info map[string]string -} - -func mkRange(offset, size int64) string { - if offset == 0 && size == 0 { - return "" - } - if size == 0 { - return fmt.Sprintf("bytes=%d-", offset) - } - return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1) -} - -// DownloadFileByName wraps b2_download_file_by_name. -func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64) (*FileReader, error) { - uri := fmt.Sprintf("%s/file/%s/%s", b.b2.downloadURI, b.Name, escape(name)) - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return nil, err - } - req.Header.Set("Authorization", b.b2.authToken) - req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) - req.Header.Set("X-Blazer-Method", "b2_download_file_by_name") - b.b2.opts.addHeaders(req) - rng := mkRange(offset, size) - if rng != "" { - req.Header.Set("Range", rng) - } - logRequest(req, nil) - resp, err := makeNetRequest(ctx, req, b.b2.opts.getTransport()) - if err != nil { - return nil, err - } - logResponse(resp, nil) - if resp.StatusCode != 200 && resp.StatusCode != 206 { - defer resp.Body.Close() - return nil, mkErr(resp) - } - clen, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - resp.Body.Close() - return nil, err - } - info := make(map[string]string) - for key := range resp.Header { - if !strings.HasPrefix(key, "X-Bz-Info-") { - continue - } - name, err := unescape(strings.TrimPrefix(key, "X-Bz-Info-")) - if err != nil { - resp.Body.Close() - return nil, err - } - val, err := unescape(resp.Header.Get(key)) - if err != nil { - resp.Body.Close() - return nil, err - } - info[name] = val - } - sha1 := resp.Header.Get("X-Bz-Content-Sha1") - if sha1 == "none" && info["Large_file_sha1"] != "" { - sha1 = info["Large_file_sha1"] - } - return &FileReader{ - ReadCloser: resp.Body, - SHA1: sha1, - ID: resp.Header.Get("X-Bz-File-Id"), - ContentType: resp.Header.Get("Content-Type"), - ContentLength: int(clen), - Info: info, - }, nil -} - -// HideFile wraps b2_hide_file. -func (b *Bucket) HideFile(ctx context.Context, name string) (*File, error) { - b2req := &b2types.HideFileRequest{ - BucketID: b.ID, - File: name, - } - b2resp := &b2types.HideFileResponse{} - headers := map[string]string{ - "Authorization": b.b2.authToken, - } - if err := b.b2.opts.makeRequest(ctx, "b2_hide_file", "POST", b.b2.apiURI+b2types.V1api+"b2_hide_file", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - return &File{ - Status: b2resp.Action, - Name: name, - Timestamp: millitime(b2resp.Timestamp), - b2: b.b2, - id: b2resp.ID, - }, nil -} - -// FileInfo holds information about a specific file. -type FileInfo struct { - Name string - SHA1 string - Size int64 - ContentType string - Info map[string]string - Status string - Timestamp time.Time -} - -// GetFileInfo wraps b2_get_file_info. -func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) { - b2req := &b2types.GetFileInfoRequest{ - ID: f.id, - } - b2resp := &b2types.GetFileInfoResponse{} - headers := map[string]string{ - "Authorization": f.b2.authToken, - } - if err := f.b2.opts.makeRequest(ctx, "b2_get_file_info", "POST", f.b2.apiURI+b2types.V1api+"b2_get_file_info", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - f.Status = b2resp.Action - f.Name = b2resp.Name - f.Timestamp = millitime(b2resp.Timestamp) - f.Info = &FileInfo{ - Name: b2resp.Name, - SHA1: b2resp.SHA1, - Size: b2resp.Size, - ContentType: b2resp.ContentType, - Info: b2resp.Info, - Status: b2resp.Action, - Timestamp: millitime(b2resp.Timestamp), - } - return f.Info, nil -} - -// Key is a B2 application key. -type Key struct { - ID string - Secret string - Name string - Capabilities []string - Expires time.Time - b2 *B2 -} - -// CreateKey wraps b2_create_key. -func (b *B2) CreateKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (*Key, error) { - b2req := &b2types.CreateKeyRequest{ - AccountID: b.accountID, - Capabilities: caps, - Name: name, - Valid: int(valid.Seconds()), - BucketID: bucketID, - Prefix: prefix, - } - b2resp := &b2types.CreateKeyResponse{} - headers := map[string]string{ - "Authorization": b.authToken, - } - if err := b.opts.makeRequest(ctx, "b2_create_key", "POST", b.apiURI+b2types.V1api+"b2_create_key", b2req, b2resp, headers, nil); err != nil { - return nil, err - } - return &Key{ - Name: b2resp.Name, - ID: b2resp.ID, - Secret: b2resp.Secret, - Capabilities: b2resp.Capabilities, - Expires: millitime(b2resp.Expires), - b2: b, - }, nil -} - -// Delete wraps b2_delete_key. -func (k *Key) Delete(ctx context.Context) error { - b2req := &b2types.DeleteKeyRequest{ - KeyID: k.ID, - } - headers := map[string]string{ - "Authorization": k.b2.authToken, - } - return k.b2.opts.makeRequest(ctx, "b2_delete_key", "POST", k.b2.apiURI+b2types.V1api+"b2_delete_key", b2req, nil, headers, nil) -} - -// ListKeys wraps b2_list_keys. -func (b *B2) ListKeys(ctx context.Context, max int, next string) ([]*Key, string, error) { - b2req := &b2types.ListKeysRequest{ - AccountID: b.accountID, - Max: max, - Next: next, - } - headers := map[string]string{ - "Authorization": b.authToken, - } - b2resp := &b2types.ListKeysResponse{} - if err := b.opts.makeRequest(ctx, "b2_list_keys", "POST", b.apiURI+b2types.V1api+"b2_list_keys", b2req, b2resp, headers, nil); err != nil { - return nil, "", err - } - var keys []*Key - for _, key := range b2resp.Keys { - keys = append(keys, &Key{ - Name: key.Name, - ID: key.ID, - Capabilities: key.Capabilities, - Expires: millitime(key.Expires), - b2: b, - }) - } - return keys, b2resp.Next, nil -} diff --git a/vendor/github.com/kurin/blazer/base/strings.go b/vendor/github.com/kurin/blazer/base/strings.go deleted file mode 100644 index f313666bd..000000000 --- a/vendor/github.com/kurin/blazer/base/strings.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package base - -import ( - "net/url" - "strings" -) - -func escape(s string) string { - return strings.Replace(url.QueryEscape(s), "%2F", "/", -1) -} - -func unescape(s string) (string, error) { - return url.QueryUnescape(s) -} diff --git a/vendor/github.com/kurin/blazer/internal/b2assets/b2assets.go b/vendor/github.com/kurin/blazer/internal/b2assets/b2assets.go deleted file mode 100644 index bc7bba4bd..000000000 --- a/vendor/github.com/kurin/blazer/internal/b2assets/b2assets.go +++ /dev/null @@ -1,237 +0,0 @@ -// Code generated by go-bindata. -// sources: -// data/status.html -// DO NOT EDIT! - -package b2assets - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _dataStatusHtml = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd4\x93\x41\x6f\xe3\x20\x10\x85\xef\xf9\x15\xb3\x56\x8e\x51\x90\x73\x5c\x4d\xb8\xec\xee\x79\xa3\xaa\x52\xd5\x23\x36\xa3\x60\x09\x43\x84\x71\x9a\xc8\xe2\xbf\x57\x18\x83\xa3\xb6\x87\x5e\x7b\xf2\x98\xf7\xe6\xf1\xbe\x03\xf8\xeb\xef\xff\x3f\xcf\xaf\xa7\x7f\xa0\x7c\xaf\xf9\x06\xf3\x87\x84\xe4\x1b\x00\xf4\x9d\xd7\xc4\x9b\x03\xb4\xba\x23\xe3\x61\xf0\xc2\x8f\x03\xb2\x74\xbe\x41\x96\x9c\xd8\x58\x79\x8f\x0b\xd3\xb4\xed\xc9\x2b\x2b\x07\xf8\x7d\x84\x3c\xee\x43\x48\x9a\x1c\x9d\xf0\x9d\x35\xb3\xba\xfe\x14\xdd\x8b\x46\x53\xd4\xd2\x90\xce\x51\xd5\xbc\xb5\xa3\xf1\xd0\xdc\xa1\xb5\x92\x90\xa9\x3a\xb5\x8b\xae\x38\xc5\x65\x27\xcc\x99\x60\xb9\x3e\x66\xe4\x26\x73\x48\x74\xbb\x64\x8d\xa3\xe4\xa5\x69\x08\xc8\xbc\xcc\x52\xc9\xc9\xed\xe6\xa4\x52\x75\xc9\x5a\x43\x3a\x23\xe9\x06\x4b\xf1\x7c\x79\xf1\x7f\xcc\x26\x23\x73\x1b\x96\xeb\xac\xa7\xc8\x0a\x50\x64\x1e\x2f\xda\x0a\x39\x64\xda\x87\x6e\x46\xf4\xb4\x83\xed\x55\xe8\xd8\x6e\xff\xe2\x3a\x4f\xae\x70\xaa\x03\x9f\xa6\x64\x82\x58\x40\x1d\x3e\xc1\x75\x72\x07\xdb\x8b\xb3\xe7\x99\xee\x2a\xf4\xfe\xe4\xec\xd9\xd1\xb0\x02\x46\xb4\x36\x3a\x43\x00\xbc\x2c\x2a\x5c\x85\x1e\xe9\x58\x4d\xd3\xbc\x1d\x42\x05\xbd\xb8\x1d\xab\xba\xe2\xc8\xb2\x89\x63\xe3\x80\x7d\x05\xfd\x80\xaa\x6a\x2e\xed\x9b\xf9\x26\xe1\x13\x09\xf9\xa3\x08\x91\xa5\x17\x81\x2c\xbd\xa8\xf7\x00\x00\x00\xff\xff\xd4\xf0\x90\xb4\x69\x03\x00\x00") - -func dataStatusHtmlBytes() ([]byte, error) { - return bindataRead( - _dataStatusHtml, - "data/status.html", - ) -} - -func dataStatusHtml() (*asset, error) { - bytes, err := dataStatusHtmlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "data/status.html", size: 873, mode: os.FileMode(436), modTime: time.Unix(1520578750, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "data/status.html": dataStatusHtml, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "data": &bintree{nil, map[string]*bintree{ - "status.html": &bintree{dataStatusHtml, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/kurin/blazer/internal/b2assets/gen.go b/vendor/github.com/kurin/blazer/internal/b2assets/gen.go deleted file mode 100644 index 8c4ac4f58..000000000 --- a/vendor/github.com/kurin/blazer/internal/b2assets/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b2assets contains data required by other libraries in blazer. -package b2assets - -//go:generate go-bindata -pkg $GOPACKAGE -o b2assets.go data/ diff --git a/vendor/github.com/kurin/blazer/internal/b2types/b2types.go b/vendor/github.com/kurin/blazer/internal/b2types/b2types.go deleted file mode 100644 index bbc3476dc..000000000 --- a/vendor/github.com/kurin/blazer/internal/b2types/b2types.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2016, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b2types implements internal types common to the B2 API. -package b2types - -// You know what would be amazing? If I could autogen this from like a JSON -// file. Wouldn't that be amazing? That would be amazing. - -const ( - V1api = "/b2api/v1/" -) - -type ErrorMessage struct { - Status int `json:"status"` - Code string `json:"code"` - Msg string `json:"message"` -} - -type AuthorizeAccountResponse struct { - AccountID string `json:"accountId"` - AuthToken string `json:"authorizationToken"` - URI string `json:"apiUrl"` - DownloadURI string `json:"downloadUrl"` - MinPartSize int `json:"minimumPartSize"` - PartSize int `json:"recommendedPartSize"` - AbsMinPartSize int `json:"absoluteMinimumPartSize"` - Allowed Allowance `json:"allowed"` -} - -type Allowance struct { - Capabilities []string `json:"capabilities"` - Bucket string `json:"bucketId"` - Prefix string `json:"namePrefix"` -} - -type LifecycleRule struct { - DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"` - DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"` - Prefix string `json:"fileNamePrefix"` -} - -type CreateBucketRequest struct { - AccountID string `json:"accountId"` - Name string `json:"bucketName"` - Type string `json:"bucketType"` - Info map[string]string `json:"bucketInfo"` - LifecycleRules []LifecycleRule `json:"lifecycleRules"` -} - -type CreateBucketResponse struct { - BucketID string `json:"bucketId"` - Name string `json:"bucketName"` - Type string `json:"bucketType"` - Info map[string]string `json:"bucketInfo"` - LifecycleRules []LifecycleRule `json:"lifecycleRules"` - Revision int `json:"revision"` -} - -type DeleteBucketRequest struct { - AccountID string `json:"accountId"` - BucketID string `json:"bucketId"` -} - -type ListBucketsRequest struct { - AccountID string `json:"accountId"` - Bucket string `json:"bucketId,omitempty"` -} - -type ListBucketsResponse struct { - Buckets []CreateBucketResponse `json:"buckets"` -} - -type UpdateBucketRequest struct { - AccountID string `json:"accountId"` - BucketID string `json:"bucketId"` - Type string `json:"bucketType,omitempty"` - Info map[string]string `json:"bucketInfo,omitempty"` - LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` - IfRevisionIs int `json:"ifRevisionIs,omitempty"` -} - -type UpdateBucketResponse CreateBucketResponse - -type GetUploadURLRequest struct { - BucketID string `json:"bucketId"` -} - -type GetUploadURLResponse struct { - URI string `json:"uploadUrl"` - Token string `json:"authorizationToken"` -} - -type UploadFileResponse GetFileInfoResponse - -type DeleteFileVersionRequest struct { - Name string `json:"fileName"` - FileID string `json:"fileId"` -} - -type StartLargeFileRequest struct { - BucketID string `json:"bucketId"` - Name string `json:"fileName"` - ContentType string `json:"contentType"` - Info map[string]string `json:"fileInfo,omitempty"` -} - -type StartLargeFileResponse struct { - ID string `json:"fileId"` -} - -type CancelLargeFileRequest struct { - ID string `json:"fileId"` -} - -type ListPartsRequest struct { - ID string `json:"fileId"` - Start int `json:"startPartNumber"` - Count int `json:"maxPartCount"` -} - -type ListPartsResponse struct { - Next int `json:"nextPartNumber"` - Parts []struct { - ID string `json:"fileId"` - Number int `json:"partNumber"` - SHA1 string `json:"contentSha1"` - Size int64 `json:"contentLength"` - } `json:"parts"` -} - -type getUploadPartURLRequest struct { - ID string `json:"fileId"` -} - -type getUploadPartURLResponse struct { - URL string `json:"uploadUrl"` - Token string `json:"authorizationToken"` -} - -type FinishLargeFileRequest struct { - ID string `json:"fileId"` - Hashes []string `json:"partSha1Array"` -} - -type FinishLargeFileResponse struct { - Name string `json:"fileName"` - FileID string `json:"fileId"` - Timestamp int64 `json:"uploadTimestamp"` - Action string `json:"action"` -} - -type ListFileNamesRequest struct { - BucketID string `json:"bucketId"` - Count int `json:"maxFileCount"` - Continuation string `json:"startFileName,omitempty"` - Prefix string `json:"prefix,omitempty"` - Delimiter string `json:"delimiter,omitempty"` -} - -type ListFileNamesResponse struct { - Continuation string `json:"nextFileName"` - Files []GetFileInfoResponse `json:"files"` -} - -type ListFileVersionsRequest struct { - BucketID string `json:"bucketId"` - Count int `json:"maxFileCount"` - StartName string `json:"startFileName,omitempty"` - StartID string `json:"startFileId,omitempty"` - Prefix string `json:"prefix,omitempty"` - Delimiter string `json:"delimiter,omitempty"` -} - -type ListFileVersionsResponse struct { - NextName string `json:"nextFileName"` - NextID string `json:"nextFileId"` - Files []GetFileInfoResponse `json:"files"` -} - -type HideFileRequest struct { - BucketID string `json:"bucketId"` - File string `json:"fileName"` -} - -type HideFileResponse struct { - ID string `json:"fileId"` - Timestamp int64 `json:"uploadTimestamp"` - Action string `json:"action"` -} - -type GetFileInfoRequest struct { - ID string `json:"fileId"` -} - -type GetFileInfoResponse struct { - FileID string `json:"fileId,omitempty"` - Name string `json:"fileName,omitempty"` - AccountID string `json:"accountId,omitempty"` - BucketID string `json:"bucketId,omitempty"` - Size int64 `json:"contentLength,omitempty"` - SHA1 string `json:"contentSha1,omitempty"` - ContentType string `json:"contentType,omitempty"` - Info map[string]string `json:"fileInfo,omitempty"` - Action string `json:"action,omitempty"` - Timestamp int64 `json:"uploadTimestamp,omitempty"` -} - -type GetDownloadAuthorizationRequest struct { - BucketID string `json:"bucketId"` - Prefix string `json:"fileNamePrefix"` - Valid int `json:"validDurationInSeconds"` - ContentDisposition string `json:"b2ContentDisposition,omitempty"` -} - -type GetDownloadAuthorizationResponse struct { - BucketID string `json:"bucketId"` - Prefix string `json:"fileNamePrefix"` - Token string `json:"authorizationToken"` -} - -type ListUnfinishedLargeFilesRequest struct { - BucketID string `json:"bucketId"` - Continuation string `json:"startFileId,omitempty"` - Count int `json:"maxFileCount,omitempty"` -} - -type ListUnfinishedLargeFilesResponse struct { - Files []GetFileInfoResponse `json:"files"` - Continuation string `json:"nextFileId"` -} - -type CreateKeyRequest struct { - AccountID string `json:"accountId"` - Capabilities []string `json:"capabilities"` - Name string `json:"keyName"` - Valid int `json:"validDurationInSeconds,omitempty"` - BucketID string `json:"bucketId,omitempty"` - Prefix string `json:"namePrefix,omitempty"` -} - -type Key struct { - ID string `json:"applicationKeyId"` - Secret string `json:"applicationKey"` - AccountID string `json:"accountId"` - Capabilities []string `json:"capabilities"` - Name string `json:"keyName"` - Expires int64 `json:"expirationTimestamp"` - BucketID string `json:"bucketId"` - Prefix string `json:"namePrefix"` -} - -type CreateKeyResponse Key - -type DeleteKeyRequest struct { - KeyID string `json:"applicationKeyId"` -} - -type DeleteKeyResponse Key - -type ListKeysRequest struct { - AccountID string `json:"accountId"` - Max int `json:"maxKeyCount,omitempty"` - Next string `json:"startApplicationKeyId,omitempty"` -} - -type ListKeysResponse struct { - Keys []Key `json:"keys"` - Next string `json:"nextApplicationKeyId"` -} diff --git a/vendor/github.com/kurin/blazer/internal/blog/blog.go b/vendor/github.com/kurin/blazer/internal/blog/blog.go deleted file mode 100644 index 793301a26..000000000 --- a/vendor/github.com/kurin/blazer/internal/blog/blog.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2017, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package blog implements a private logger, in the manner of glog, without -// polluting the flag namespace or leaving files all over /tmp. -// -// It has almost no features, and a bunch of global state. -package blog - -import ( - "log" - "os" - "strconv" -) - -var level int32 - -type Verbose bool - -func init() { - lvl := os.Getenv("B2_LOG_LEVEL") - i, err := strconv.ParseInt(lvl, 10, 32) - if err != nil { - return - } - level = int32(i) -} - -func (v Verbose) Info(a ...interface{}) { - if v { - log.Print(a...) - } -} - -func (v Verbose) Infof(format string, a ...interface{}) { - if v { - log.Printf(format, a...) - } -} - -func V(target int32) Verbose { - return Verbose(target <= level) -} diff --git a/vendor/github.com/kurin/blazer/x/window/window.go b/vendor/github.com/kurin/blazer/x/window/window.go deleted file mode 100644 index 1f0fd26ed..000000000 --- a/vendor/github.com/kurin/blazer/x/window/window.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2018, the Blazer authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package window provides a type for efficiently recording events as they -// occur over a given span of time. Events added to the window will remain -// until the time expires. -package window - -import ( - "sync" - "time" -) - -// A Window efficiently records events that have occurred over a span of time -// extending from some fixed interval ago to now. Events that pass beyond this -// horizon are discarded. -type Window struct { - mu sync.Mutex - events []interface{} - res time.Duration - last time.Time - reduce Reducer - forever bool - e interface{} -} - -// A Reducer should take two values from the window and combine them into a -// third value that will be stored in the window. The values i or j may be -// nil. The underlying types for both arguments and the output should be -// identical. -// -// If the reducer is any kind of slice or list, then data usage will grow -// linearly with the number of events added to the window. -// -// Reducer will be called on its own output: Reducer(Reducer(x, y), z). -type Reducer func(i, j interface{}) interface{} - -// New returns an initialized window for events over the given duration at the -// given resolution. Windows with tight resolution (i.e., small values for -// that argument) will be more accurate, at the cost of some memory. -// -// A size of 0 means "forever"; old events will never be removed. -func New(size, resolution time.Duration, r Reducer) *Window { - if size > 0 { - return &Window{ - res: resolution, - events: make([]interface{}, size/resolution), - reduce: r, - } - } - return &Window{ - forever: true, - reduce: r, - } -} - -func (w *Window) bucket(now time.Time) int { - nanos := now.UnixNano() - abs := nanos / int64(w.res) - return int(abs) % len(w.events) -} - -// sweep keeps the window valid. It needs to be called from every method that -// views or updates the window, and the caller needs to hold the mutex. -func (w *Window) sweep(now time.Time) { - if w.forever { - return - } - defer func() { - w.last = now - }() - - // This compares now and w.last's monotonic clocks. - diff := now.Sub(w.last) - if diff < 0 { - // time went backwards somehow; zero events and return - for i := range w.events { - w.events[i] = nil - } - return - } - last := now.Add(-diff) - - b := w.bucket(now) - p := w.bucket(last) - - if b == p && diff <= w.res { - // We're in the same bucket as the previous sweep, so all buckets are - // valid. - return - } - - if diff > w.res*time.Duration(len(w.events)) { - // We've gone longer than this window measures since the last sweep, just - // zero the thing and have done. - for i := range w.events { - w.events[i] = nil - } - return - } - - // Expire all invalid buckets. This means buckets not seen since the - // previous sweep and now, including the current bucket but not including the - // previous bucket. - old := int64(last.UnixNano()) / int64(w.res) - new := int64(now.UnixNano()) / int64(w.res) - for i := old + 1; i <= new; i++ { - b := int(i) % len(w.events) - w.events[b] = nil - } -} - -// Insert adds the given event. -func (w *Window) Insert(e interface{}) { - w.insertAt(time.Now(), e) -} - -func (w *Window) insertAt(t time.Time, e interface{}) { - w.mu.Lock() - defer w.mu.Unlock() - - if w.forever { - w.e = w.reduce(w.e, e) - return - } - - w.sweep(t) - w.events[w.bucket(t)] = w.reduce(w.events[w.bucket(t)], e) -} - -// Reduce runs the window's reducer over the valid values and returns the -// result. -func (w *Window) Reduce() interface{} { - return w.reducedAt(time.Now()) -} - -func (w *Window) reducedAt(t time.Time) interface{} { - w.mu.Lock() - defer w.mu.Unlock() - - if w.forever { - return w.e - } - - w.sweep(t) - var n interface{} - for i := range w.events { - n = w.reduce(n, w.events[i]) - } - return n -} diff --git a/vendor/github.com/marstr/guid/.travis.yml b/vendor/github.com/marstr/guid/.travis.yml deleted file mode 100644 index 35158ec53..000000000 --- a/vendor/github.com/marstr/guid/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false - -language: go - -go: - - 1.7 - - 1.8 - -install: - - go get -u github.com/golang/lint/golint - - go get -u github.com/HewlettPackard/gas - -script: - - golint --set_exit_status - - go vet - - go test -v -cover -race - - go test -bench . - - gas ./... \ No newline at end of file diff --git a/vendor/github.com/marstr/guid/LICENSE.txt b/vendor/github.com/marstr/guid/LICENSE.txt deleted file mode 100644 index e18a0841a..000000000 --- a/vendor/github.com/marstr/guid/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 Martin Strobel - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/marstr/guid/README.md b/vendor/github.com/marstr/guid/README.md deleted file mode 100644 index 355fad16d..000000000 --- a/vendor/github.com/marstr/guid/README.md +++ /dev/null @@ -1,27 +0,0 @@ -[![Build Status](https://travis-ci.org/marstr/guid.svg?branch=master)](https://travis-ci.org/marstr/guid) -[![GoDoc](https://godoc.org/github.com/marstr/guid?status.svg)](https://godoc.org/github.com/marstr/guid) -[![Go Report Card](https://goreportcard.com/badge/github.com/marstr/guid)](https://goreportcard.com/report/github.com/marstr/guid) - -# Guid -Globally unique identifiers offer a quick means of generating non-colliding values across a distributed system. For this implemenation, [RFC 4122](http://ietf.org/rfc/rfc4122.txt) governs the desired behavior. - -## What's in a name? -You have likely already noticed that RFC and some implementations refer to these structures as UUIDs (Universally Unique Identifiers), where as this project is annotated as GUIDs (Globally Unique Identifiers). The name Guid was selected to make clear this project's ties to the [.NET struct Guid.](https://msdn.microsoft.com/en-us/library/system.guid(v=vs.110).aspx) The most obvious relationship is the desire to have the same format specifiers available in this library's Format and Parse methods as .NET would have in its ToString and Parse methods. - -# Installation -- Ensure you have the [Go Programming Language](https://golang.org/) installed on your system. -- Run the command: `go get -u github.com/marstr/guid` - -# Contribution -Contributions are welcome! Feel free to send Pull Requests. Continuous Integration will ensure that you have conformed to Go conventions. Please remember to add tests for your changes. - -# Versioning -This library will adhere to the -[Semantic Versioning 2.0.0](http://semver.org/spec/v2.0.0.html) specification. It may be worth noting this should allow for tools like [glide](https://glide.readthedocs.io/en/latest/) to pull in this library with ease. - -The Release Notes portion of this file will be updated to reflect the most recent major/minor updates, with the option to tag particular bug-fixes as well. Updates to the Release Notes for patches should be addative, where as major/minor updates should replace the previous version. If one desires to see the release notes for an older version, checkout that version of code and open this file. - -# Release Notes 1.1.* - -## v1.1.0 -Adding support for JSON marshaling and unmarshaling. diff --git a/vendor/github.com/marstr/guid/guid.go b/vendor/github.com/marstr/guid/guid.go deleted file mode 100644 index 51b038b75..000000000 --- a/vendor/github.com/marstr/guid/guid.go +++ /dev/null @@ -1,301 +0,0 @@ -package guid - -import ( - "bytes" - "crypto/rand" - "errors" - "fmt" - "net" - "strings" - "sync" - "time" -) - -// GUID is a unique identifier designed to virtually guarantee non-conflict between values generated -// across a distributed system. -type GUID struct { - timeHighAndVersion uint16 - timeMid uint16 - timeLow uint32 - clockSeqHighAndReserved uint8 - clockSeqLow uint8 - node [6]byte -} - -// Format enumerates the values that are supported by Parse and Format -type Format string - -// These constants define the possible string formats available via this implementation of Guid. -const ( - FormatB Format = "B" // {00000000-0000-0000-0000-000000000000} - FormatD Format = "D" // 00000000-0000-0000-0000-000000000000 - FormatN Format = "N" // 00000000000000000000000000000000 - FormatP Format = "P" // (00000000-0000-0000-0000-000000000000) - FormatX Format = "X" // {0x00000000,0x0000,0x0000,{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}} - FormatDefault Format = FormatD -) - -// CreationStrategy enumerates the values that are supported for populating the bits of a new Guid. -type CreationStrategy string - -// These constants define the possible creation strategies available via this implementation of Guid. -const ( - CreationStrategyVersion1 CreationStrategy = "version1" - CreationStrategyVersion2 CreationStrategy = "version2" - CreationStrategyVersion3 CreationStrategy = "version3" - CreationStrategyVersion4 CreationStrategy = "version4" - CreationStrategyVersion5 CreationStrategy = "version5" -) - -var emptyGUID GUID - -// NewGUID generates and returns a new globally unique identifier -func NewGUID() GUID { - result, err := version4() - if err != nil { - panic(err) //Version 4 (pseudo-random GUID) doesn't use anything that could fail. - } - return result -} - -var knownStrategies = map[CreationStrategy]func() (GUID, error){ - CreationStrategyVersion1: version1, - CreationStrategyVersion4: version4, -} - -// NewGUIDs generates and returns a new globally unique identifier that conforms to the given strategy. -func NewGUIDs(strategy CreationStrategy) (GUID, error) { - if creator, present := knownStrategies[strategy]; present { - result, err := creator() - return result, err - } - return emptyGUID, errors.New("Unsupported CreationStrategy") -} - -// Empty returns a copy of the default and empty GUID. -func Empty() GUID { - return emptyGUID -} - -var knownFormats = map[Format]string{ - FormatN: "%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x", - FormatD: "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", - FormatB: "{%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x}", - FormatP: "(%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x)", - FormatX: "{0x%08x,0x%04x,0x%04x,{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x}}", -} - -// MarshalJSON writes a GUID as a JSON string. -func (guid GUID) MarshalJSON() (marshaled []byte, err error) { - buf := bytes.Buffer{} - - _, err = buf.WriteRune('"') - buf.WriteString(guid.String()) - buf.WriteRune('"') - - marshaled = buf.Bytes() - return -} - -// Parse instantiates a GUID from a text representation of the same GUID. -// This is the inverse of function family String() -func Parse(value string) (GUID, error) { - var guid GUID - for _, fullFormat := range knownFormats { - parity, err := fmt.Sscanf( - value, - fullFormat, - &guid.timeLow, - &guid.timeMid, - &guid.timeHighAndVersion, - &guid.clockSeqHighAndReserved, - &guid.clockSeqLow, - &guid.node[0], - &guid.node[1], - &guid.node[2], - &guid.node[3], - &guid.node[4], - &guid.node[5]) - if parity == 11 && err == nil { - return guid, err - } - } - return emptyGUID, fmt.Errorf("\"%s\" is not in a recognized format", value) -} - -// String returns a text representation of a GUID in the default format. -func (guid GUID) String() string { - return guid.Stringf(FormatDefault) -} - -// Stringf returns a text representation of a GUID that conforms to the specified format. -// If an unrecognized format is provided, the empty string is returned. -func (guid GUID) Stringf(format Format) string { - if format == "" { - format = FormatDefault - } - fullFormat, present := knownFormats[format] - if !present { - return "" - } - return fmt.Sprintf( - fullFormat, - guid.timeLow, - guid.timeMid, - guid.timeHighAndVersion, - guid.clockSeqHighAndReserved, - guid.clockSeqLow, - guid.node[0], - guid.node[1], - guid.node[2], - guid.node[3], - guid.node[4], - guid.node[5]) -} - -// UnmarshalJSON parses a GUID from a JSON string token. -func (guid *GUID) UnmarshalJSON(marshaled []byte) (err error) { - if len(marshaled) < 2 { - err = errors.New("JSON GUID must be surrounded by quotes") - return - } - stripped := marshaled[1 : len(marshaled)-1] - *guid, err = Parse(string(stripped)) - return -} - -// Version reads a GUID to parse which mechanism of generating GUIDS was employed. -// Values returned here are documented in rfc4122.txt. -func (guid GUID) Version() uint { - return uint(guid.timeHighAndVersion >> 12) -} - -var unixToGregorianOffset = time.Date(1970, 01, 01, 0, 0, 00, 0, time.UTC).Sub(time.Date(1582, 10, 15, 0, 0, 0, 0, time.UTC)) - -// getRFC4122Time returns a 60-bit count of 100-nanosecond intervals since 00:00:00.00 October 15th, 1582 -func getRFC4122Time() int64 { - currentTime := time.Now().UTC().Add(unixToGregorianOffset).UnixNano() - currentTime /= 100 - return currentTime & 0x0FFFFFFFFFFFFFFF -} - -var clockSeqVal uint16 -var clockSeqKey sync.Mutex - -func getClockSequence() (uint16, error) { - clockSeqKey.Lock() - defer clockSeqKey.Unlock() - - if 0 == clockSeqVal { - var temp [2]byte - if parity, err := rand.Read(temp[:]); !(2 == parity && nil == err) { - return 0, err - } - clockSeqVal = uint16(temp[0])<<8 | uint16(temp[1]) - } - clockSeqVal++ - return clockSeqVal, nil -} - -func getMACAddress() (mac [6]byte, err error) { - var hostNICs []net.Interface - - hostNICs, err = net.Interfaces() - if err != nil { - return - } - - for _, nic := range hostNICs { - var parity int - - parity, err = fmt.Sscanf( - strings.ToLower(nic.HardwareAddr.String()), - "%02x:%02x:%02x:%02x:%02x:%02x", - &mac[0], - &mac[1], - &mac[2], - &mac[3], - &mac[4], - &mac[5]) - - if parity == len(mac) { - return - } - } - - err = fmt.Errorf("No suitable address found") - - return -} - -func version1() (result GUID, err error) { - var localMAC [6]byte - var clockSeq uint16 - - currentTime := getRFC4122Time() - - result.timeLow = uint32(currentTime) - result.timeMid = uint16(currentTime >> 32) - result.timeHighAndVersion = uint16(currentTime >> 48) - if err = result.setVersion(1); err != nil { - return emptyGUID, err - } - - if localMAC, err = getMACAddress(); nil != err { - if parity, err := rand.Read(localMAC[:]); !(len(localMAC) != parity && err == nil) { - return emptyGUID, err - } - localMAC[0] |= 0x1 - } - copy(result.node[:], localMAC[:]) - - if clockSeq, err = getClockSequence(); nil != err { - return emptyGUID, err - } - - result.clockSeqLow = uint8(clockSeq) - result.clockSeqHighAndReserved = uint8(clockSeq >> 8) - - result.setReservedBits() - - return -} - -func version4() (GUID, error) { - var retval GUID - var bits [10]byte - - if parity, err := rand.Read(bits[:]); !(len(bits) == parity && err == nil) { - return emptyGUID, err - } - retval.timeHighAndVersion |= uint16(bits[0]) | uint16(bits[1])<<8 - retval.timeMid |= uint16(bits[2]) | uint16(bits[3])<<8 - retval.timeLow |= uint32(bits[4]) | uint32(bits[5])<<8 | uint32(bits[6])<<16 | uint32(bits[7])<<24 - retval.clockSeqHighAndReserved = uint8(bits[8]) - retval.clockSeqLow = uint8(bits[9]) - - //Randomly set clock-sequence, reserved, and node - if written, err := rand.Read(retval.node[:]); !(nil == err && written == len(retval.node)) { - retval = emptyGUID - return retval, err - } - - if err := retval.setVersion(4); nil != err { - return emptyGUID, err - } - retval.setReservedBits() - - return retval, nil -} - -func (guid *GUID) setVersion(version uint16) error { - if version > 5 || version == 0 { - return fmt.Errorf("While setting GUID version, unsupported version: %d", version) - } - guid.timeHighAndVersion = (guid.timeHighAndVersion & 0x0fff) | version<<12 - return nil -} - -func (guid *GUID) setReservedBits() { - guid.clockSeqHighAndReserved = (guid.clockSeqHighAndReserved & 0x3f) | 0x80 -} diff --git a/vendor/github.com/minio/minio-go/v6/.gitignore b/vendor/github.com/minio/minio-go/v6/.gitignore deleted file mode 100644 index fa967abd7..000000000 --- a/vendor/github.com/minio/minio-go/v6/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*~ -*.test -validator diff --git a/vendor/github.com/minio/minio-go/v6/.travis.yml b/vendor/github.com/minio/minio-go/v6/.travis.yml deleted file mode 100644 index e50cefee7..000000000 --- a/vendor/github.com/minio/minio-go/v6/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -sudo: false -language: go - -os: -- linux - -env: -- ARCH=x86_64 - -go: -- 1.13.x -- tip - -matrix: - fast_finish: true - allow_failures: - - go: tip - -before_install: - - sudo apt-get install devscripts - - mkdir /tmp/minio - - (cd /tmp/minio; GO111MODULE=on go get github.com/minio/minio) - - sudo cp testcerts/public.crt /usr/local/share/ca-certificates/ - - sudo update-ca-certificates - - MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 ${GOPATH}/bin/minio server --compat --quiet --certs-dir testcerts data 2>&1 > minio.log & - -script: - - diff -au <(gofmt -d .) <(printf "") - - diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "") - - make diff --git a/vendor/github.com/minio/minio-go/v6/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v6/CONTRIBUTING.md deleted file mode 100644 index 8b1ee86c6..000000000 --- a/vendor/github.com/minio/minio-go/v6/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ - -### Developer Guidelines - -``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: - -* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. - - Fork it - - Create your feature branch (git checkout -b my-new-feature) - - Commit your changes (git commit -am 'Add some feature') - - Push to the branch (git push origin my-new-feature) - - Create new Pull Request - -* When you're ready to create a pull request, be sure to: - - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. - - Run `go fmt` - - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. - - Make sure `go test -race ./...` and `go build` completes. - NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables - ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` - -* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project - - `minio-go` project is strictly conformant with Golang style - - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v6/LICENSE b/vendor/github.com/minio/minio-go/v6/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/minio/minio-go/v6/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/minio/minio-go/v6/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v6/MAINTAINERS.md deleted file mode 100644 index f640dfb9f..000000000 --- a/vendor/github.com/minio/minio-go/v6/MAINTAINERS.md +++ /dev/null @@ -1,35 +0,0 @@ -# For maintainers only - -## Responsibilities - -Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) - -### Making new releases -Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. -```sh -$ export GNUPGHOME=/media/${USER}/minio/trusted -$ git tag -s 4.0.0 -$ git push -$ git push --tags -``` - -### Update version -Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. - -```sh -$ grep libraryVersion api.go - libraryVersion = "4.0.1" -``` - -Commit your changes -``` -$ git commit -a -m "Update version for next release" --author "MinIO Trusted " -``` - -### Announce -Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. - -To generate `changelog` -```sh -$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. -``` diff --git a/vendor/github.com/minio/minio-go/v6/Makefile b/vendor/github.com/minio/minio-go/v6/Makefile deleted file mode 100644 index 18a74665c..000000000 --- a/vendor/github.com/minio/minio-go/v6/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -all: checks - -.PHONY: examples docs - -checks: vet test examples functional-test - -vet: - @GO111MODULE=on go vet ./... - -test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... - -examples: - @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done - -functional-test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go - -clean: - @echo "Cleaning up all the generated files" - @find . -name '*.test' | xargs rm -fv - @find . -name '*~' | xargs rm -fv diff --git a/vendor/github.com/minio/minio-go/v6/NOTICE b/vendor/github.com/minio/minio-go/v6/NOTICE deleted file mode 100644 index fc6c8808e..000000000 --- a/vendor/github.com/minio/minio-go/v6/NOTICE +++ /dev/null @@ -1,2 +0,0 @@ -minio-go -Copyright 2015-2017 MinIO, Inc. \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v6/README.md b/vendor/github.com/minio/minio-go/v6/README.md deleted file mode 100644 index 810a93941..000000000 --- a/vendor/github.com/minio/minio-go/v6/README.md +++ /dev/null @@ -1,238 +0,0 @@ -# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) - -The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. - -This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). - -This document assumes that you have a working [Go development environment](https://golang.org/doc/install). - -## Download from Github -```sh -GO111MODULE=on go get github.com/minio/minio-go/v6 -``` - -## Initialize MinIO Client -MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. - -| Parameter | Description| -| :--- | :--- | -| endpoint | URL to object storage service. | -| accessKeyID | Access key is the user ID that uniquely identifies your account. | -| secretAccessKey | Secret key is the password to your account. | -| secure | Set this value to 'true' to enable secure (HTTPS) access. | - - -```go -package main - -import ( - "github.com/minio/minio-go/v6" - "log" -) - -func main() { - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - log.Printf("%#v\n", minioClient) // minioClient is now setup -} -``` - -## Quick Start Example - File Uploader -This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. - -We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. - -### FileUploader.go -```go -package main - -import ( - "github.com/minio/minio-go/v6" - "log" -) - -func main() { - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - // Make a new bucket called mymusic. - bucketName := "mymusic" - location := "us-east-1" - - err = minioClient.MakeBucket(bucketName, location) - if err != nil { - // Check to see if we already own this bucket (which happens if you run this twice) - exists, errBucketExists := minioClient.BucketExists(bucketName) - if errBucketExists == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { - log.Fatalln(err) - } - } else { - log.Printf("Successfully created %s\n", bucketName) - } - - // Upload the zip file - objectName := "golden-oldies.zip" - filePath := "/tmp/golden-oldies.zip" - contentType := "application/zip" - - // Upload the zip file with FPutObject - n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, n) -} -``` - -### Run FileUploader -```sh -go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic -2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 - -mc ls play/mymusic/ -[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip -``` - -## API Reference -The full API Reference is available here. - -* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) - -### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) - -### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) - -### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) - -### API Reference : File Object Operations -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) -* [`FPutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FPutObjectWithContext) -* [`FGetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FGetObjectWithContext) - -### API Reference : Object Operations -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`GetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#GetObjectWithContext) -* [`PutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#PutObjectWithContext) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) - - -### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) - -### API Reference : Client custom settings -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) - -## Full Examples - -### Full Examples : Bucket Operations -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) - -### Full Examples : Bucket policy Operations -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - -### Full Examples : Bucket lifecycle Operations -* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) -* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) - -### Full Examples : Bucket notification Operations -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) - -### Full Examples : File Object Operations -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) -* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) -* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) - -### Full Examples : Object Operations -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) -* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) - -### Full Examples : Encrypted Object Operations -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) - -### Full Examples : Presigned Operations -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) - -## Explore Further -* [Complete Documentation](https://docs.min.io) -* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) - -## Contribute -[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) - -[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) -[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) - -## License -This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v6/README_zh_CN.md b/vendor/github.com/minio/minio-go/v6/README_zh_CN.md deleted file mode 100644 index b5ba0c1d0..000000000 --- a/vendor/github.com/minio/minio-go/v6/README_zh_CN.md +++ /dev/null @@ -1,244 +0,0 @@ -# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) - -MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 - -**支持的云存储:** - -- AWS Signature Version 4 - - Amazon S3 - - MinIO - -- AWS Signature Version 2 - - Google Cloud Storage (兼容模式) - - Openstack Swift + Swift3 middleware - - Ceph Object Gateway - - Riak CS - -本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 - -本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 - -## 从Github下载 -```sh -go get -u github.com/minio/minio-go -``` - -## 初始化MinIO Client -MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。 - -| 参数 | 描述| -| :--- | :--- | -| endpoint | 对象存储服务的URL | -| accessKeyID | Access key是唯一标识你的账户的用户ID。 | -| secretAccessKey | Secret key是你账户的密码。 | -| secure | true代表使用HTTPS | - - -```go -package main - -import ( - "github.com/minio/minio-go/v6" - "log" -) - -func main() { - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // 初使化 minio client对象。 - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - log.Printf("%#v\n", minioClient) // minioClient初使化成功 -} -``` - -## 示例-文件上传 -本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 - -我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 - -### FileUploader.go -```go -package main - -import ( - "github.com/minio/minio-go/v6" - "log" -) - -func main() { - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // 初使化minio client对象。 - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - // 创建一个叫mymusic的存储桶。 - bucketName := "mymusic" - location := "us-east-1" - - err = minioClient.MakeBucket(bucketName, location) - if err != nil { - // 检查存储桶是否已经存在。 - exists, err := minioClient.BucketExists(bucketName) - if err == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { - log.Fatalln(err) - } - } - log.Printf("Successfully created %s\n", bucketName) - - // 上传一个zip文件。 - objectName := "golden-oldies.zip" - filePath := "/tmp/golden-oldies.zip" - contentType := "application/zip" - - // 使用FPutObject上传一个zip文件。 - n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, n) -} -``` - -### 运行FileUploader -```sh -go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic -2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 - -mc ls play/mymusic/ -[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip -``` - -## API文档 -完整的API文档在这里。 -* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) - -### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) - -### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) - -### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) - -### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FPutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FPutObjectWithContext) -* [`FGetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FGetObjectWithContext) - -### API文档 : 操作对象 -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`GetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#GetObjectWithContext) -* [`PutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#PutObjectWithContext) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) - -### API文档: 操作加密对象 -* [`GetEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#GetEncryptedObject) -* [`PutEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#PutEncryptedObject) - -### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) - -### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) - -## 完整示例 - -### 完整示例 : 操作存储桶 -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) - -### 完整示例 : 存储桶策略 -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - -### 完整示例 : 存储桶通知 -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展) - -### 完整示例 : 操作文件对象 -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) -* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) -* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) - -### 完整示例 : 操作对象 -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) -* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) - -### 完整示例 : 操作加密对象 -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) - -### 完整示例 : Presigned操作 -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) - -## 了解更多 -* [完整文档](https://docs.min.io) -* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) - -## 贡献 -[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) - -[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) -[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) - diff --git a/vendor/github.com/minio/minio-go/v6/api-compose-object.go b/vendor/github.com/minio/minio-go/v6/api-compose-object.go deleted file mode 100644 index 748b558ce..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-compose-object.go +++ /dev/null @@ -1,565 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v6/pkg/encrypt" - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// DestinationInfo - type with information about the object to be -// created via server-side copy requests, using the Compose API. -type DestinationInfo struct { - bucket, object string - encryption encrypt.ServerSide - - // if no user-metadata is provided, it is copied from source - // (when there is only once source object in the compose - // request) - userMetadata map[string]string -} - -// NewDestinationInfo - creates a compose-object/copy-source -// destination info object. -// -// `encSSEC` is the key info for server-side-encryption with customer -// provided key. If it is nil, no encryption is performed. -// -// `userMeta` is the user-metadata key-value pairs to be set on the -// destination. The keys are automatically prefixed with `x-amz-meta-` -// if needed. If nil is passed, and if only a single source (of any -// size) is provided in the ComposeObject call, then metadata from the -// source is copied to the destination. -func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucket); err != nil { - return d, err - } - if err = s3utils.CheckValidObjectName(object); err != nil { - return d, err - } - - // Process custom-metadata to remove a `x-amz-meta-` prefix if - // present and validate that keys are distinct (after this - // prefix removal). - m := make(map[string]string) - for k, v := range userMeta { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - k = k[len("x-amz-meta-"):] - } - if _, ok := m[k]; ok { - return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)) - } - m[k] = v - } - - return DestinationInfo{ - bucket: bucket, - object: object, - encryption: sse, - userMetadata: m, - }, nil -} - -// getUserMetaHeadersMap - construct appropriate key-value pairs to send -// as headers from metadata map to pass into copy-object request. For -// single part copy-object (i.e. non-multipart object), enable the -// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to -// `REPLACE`, so that metadata headers from the source are not copied -// over. -func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string { - if len(d.userMetadata) == 0 { - return nil - } - r := make(map[string]string) - if withCopyDirectiveHeader { - r["x-amz-metadata-directive"] = "REPLACE" - } - for k, v := range d.userMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { - r[k] = v - } else { - r["x-amz-meta-"+k] = v - } - } - return r -} - -// SourceInfo - represents a source object to be copied, using -// server-side copying APIs. -type SourceInfo struct { - bucket, object string - start, end int64 - encryption encrypt.ServerSide - // Headers to send with the upload-part-copy request involving - // this source object. - Headers http.Header -} - -// NewSourceInfo - create a compose-object/copy-object source info -// object. -// -// `decryptSSEC` is the decryption key using server-side-encryption -// with customer provided key. It may be nil if the source is not -// encrypted. -func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { - r := SourceInfo{ - bucket: bucket, - object: object, - start: -1, // range is unspecified by default - encryption: sse, - Headers: make(http.Header), - } - - // Set the source header - r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) - return r -} - -// SetRange - Set the start and end offset of the source object to be -// copied. If this method is not called, the whole source object is -// copied. -func (s *SourceInfo) SetRange(start, end int64) error { - if start > end || start < 0 { - return ErrInvalidArgument("start must be non-negative, and start must be at most end.") - } - // Note that 0 <= start <= end - s.start, s.end = start, end - return nil -} - -// SetMatchETagCond - Set ETag match condition. The object is copied -// only if the etag of the source matches the value given here. -func (s *SourceInfo) SetMatchETagCond(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - s.Headers.Set("x-amz-copy-source-if-match", etag) - return nil -} - -// SetMatchETagExceptCond - Set the ETag match exception -// condition. The object is copied only if the etag of the source is -// not the value given here. -func (s *SourceInfo) SetMatchETagExceptCond(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - s.Headers.Set("x-amz-copy-source-if-none-match", etag) - return nil -} - -// SetModifiedSinceCond - Set the modified since condition. -func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Input time cannot be 0.") - } - s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetUnmodifiedSinceCond - Set the unmodified since condition. -func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Input time cannot be 0.") - } - s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat)) - return nil -} - -// Helper to fetch size and etag of an object using a StatObject call. -func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) { - // Get object info - need size and etag here. Also, decryption - // headers are added to the stat request if given. - var objInfo ObjectInfo - opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}} - objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) - if err != nil { - err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) - } else { - size = objInfo.Size - etag = objInfo.ETag - userMeta = make(map[string]string) - for k, v := range objInfo.Metadata { - if strings.HasPrefix(k, "x-amz-meta-") { - if len(v) > 0 { - userMeta[k] = v[0] - } - } - } - } - return -} - -// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. -func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, - metadata map[string]string) (ObjectInfo, error) { - - // Build headers. - headers := make(http.Header) - - // Set all the metadata headers. - for k, v := range metadata { - headers.Set(k, v) - } - - // Set the source header - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) - } - - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return ObjectInfo{}, err - } - - objInfo := ObjectInfo{ - Key: destObject, - ETag: strings.Trim(cpObjRes.ETag, "\""), - LastModified: cpObjRes.LastModified, - } - return objInfo, nil -} - -func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { - - headers := make(http.Header) - - // Set source - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - - if startOffset < 0 { - return p, ErrInvalidArgument("startOffset must be non-negative") - } - - if length >= 0 { - headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) - } - - for k, v := range metadata { - headers.Set(k, v) - } - - queryValues := make(url.Values) - queryValues.Set("partNumber", strconv.Itoa(partID)) - queryValues.Set("uploadId", uploadID) - - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - queryValues: queryValues, - }) - defer closeResponse(resp) - if err != nil { - return - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, destBucket, destObject) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partID, cpObjRes.ETag - return p, nil -} - -// uploadPartCopy - helper function to create a part in a multipart -// upload via an upload-part-copy request -// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, - headers http.Header) (p CompletePart, err error) { - - // Build query parameters - urlValues := make(url.Values) - urlValues.Set("partNumber", strconv.Itoa(partNumber)) - urlValues.Set("uploadId", uploadID) - - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ - bucketName: bucket, - objectName: object, - customHeader: headers, - queryValues: urlValues, - }) - defer closeResponse(resp) - if err != nil { - return p, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, bucket, object) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partNumber, cpObjRes.ETag - return p, nil -} - -// ComposeObjectWithProgress - creates an object using server-side copying of -// existing objects. It takes a list of source objects (with optional -// offsets) and concatenates them into a new object using only -// server-side copying operations. Optionally takes progress reader hook -// for applications to look at current progress. -func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error { - if len(srcs) < 1 || len(srcs) > maxPartsCount { - return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") - } - ctx := context.Background() - srcSizes := make([]int64, len(srcs)) - var totalSize, size, totalParts int64 - var srcUserMeta map[string]string - etags := make([]string, len(srcs)) - var err error - for i, src := range srcs { - size, etags[i], srcUserMeta, err = src.getProps(c) - if err != nil { - return err - } - - // Error out if client side encryption is used in this source object when - // more than one source objects are given. - if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" { - return ErrInvalidArgument( - fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) - } - - // Check if a segment is specified, and if so, is the - // segment within object bounds? - if src.start != -1 { - // Since range is specified, - // 0 <= src.start <= src.end - // so only invalid case to check is: - if src.end >= size { - return ErrInvalidArgument( - fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)", - i, src.start, src.end, size)) - } - size = src.end - src.start + 1 - } - - // Only the last source may be less than `absMinPartSize` - if size < absMinPartSize && i < len(srcs)-1 { - return ErrInvalidArgument( - fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size)) - } - - // Is data to copy too large? - totalSize += size - if totalSize > maxMultipartPutObjectSize { - return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) - } - - // record source size - srcSizes[i] = size - - // calculate parts needed for current source - totalParts += partsRequired(size) - // Do we need more parts than we are allowed? - if totalParts > maxPartsCount { - return ErrInvalidArgument(fmt.Sprintf( - "Your proposed compose object requires more than %d parts", maxPartsCount)) - } - } - - // Single source object case (i.e. when only one source is - // involved, it is being copied wholly and at most 5GiB in - // size, emptyfiles are also supported). - if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { - return c.CopyObjectWithProgress(dst, srcs[0], progress) - } - - // Now, handle multipart-copy cases. - - // 1. Ensure that the object has not been changed while - // we are copying data. - for i, src := range srcs { - if src.Headers.Get("x-amz-copy-source-if-match") == "" { - src.SetMatchETagCond(etags[i]) - } - } - - // 2. Initiate a new multipart upload. - - // Set user-metadata on the destination object. If no - // user-metadata is specified, and there is only one source, - // (only) then metadata from source is copied. - userMeta := dst.getUserMetaHeadersMap(false) - metaMap := userMeta - if len(userMeta) == 0 && len(srcs) == 1 { - metaMap = srcUserMeta - } - metaHeaders := make(map[string]string) - for k, v := range metaMap { - metaHeaders[k] = v - } - - uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) - if err != nil { - return err - } - - // 3. Perform copy part uploads - objParts := []CompletePart{} - partIndex := 1 - for i, src := range srcs { - h := src.Headers - if src.encryption != nil { - encrypt.SSECopy(src.encryption).Marshal(h) - } - // Add destination encryption headers - if dst.encryption != nil { - dst.encryption.Marshal(h) - } - - // calculate start/end indices of parts after - // splitting. - startIdx, endIdx := calculateEvenSplits(srcSizes[i], src) - for j, start := range startIdx { - end := endIdx[j] - - // Add (or reset) source range header for - // upload part copy request. - h.Set("x-amz-copy-source-range", - fmt.Sprintf("bytes=%d-%d", start, end)) - - // make upload-part-copy request - complPart, err := c.uploadPartCopy(ctx, dst.bucket, - dst.object, uploadID, partIndex, h) - if err != nil { - return err - } - if progress != nil { - io.CopyN(ioutil.Discard, progress, end-start+1) - } - objParts = append(objParts, complPart) - partIndex++ - } - } - - // 4. Make final complete-multipart request. - _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, - completeMultipartUpload{Parts: objParts}) - if err != nil { - return err - } - return nil -} - -// ComposeObject - creates an object using server-side copying of -// existing objects. It takes a list of source objects (with optional -// offsets) and concatenates them into a new object using only -// server-side copying operations. -func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { - return c.ComposeObjectWithProgress(dst, srcs, nil) -} - -// partsRequired is maximum parts possible with -// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) -func partsRequired(size int64) int64 { - maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) - r := size / int64(maxPartSize) - if size%int64(maxPartSize) > 0 { - r++ - } - return r -} - -// calculateEvenSplits - computes splits for a source and returns -// start and end index slices. Splits happen evenly to be sure that no -// part is less than 5MiB, as that could fail the multipart request if -// it is not the last part. -func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) { - if size == 0 { - return - } - - reqParts := partsRequired(size) - startIndex = make([]int64, reqParts) - endIndex = make([]int64, reqParts) - // Compute number of required parts `k`, as: - // - // k = ceiling(size / copyPartSize) - // - // Now, distribute the `size` bytes in the source into - // k parts as evenly as possible: - // - // r parts sized (q+1) bytes, and - // (k - r) parts sized q bytes, where - // - // size = q * k + r (by simple division of size by k, - // so that 0 <= r < k) - // - start := src.start - if start == -1 { - start = 0 - } - quot, rem := size/reqParts, size%reqParts - nextStart := start - for j := int64(0); j < reqParts; j++ { - curPartSize := quot - if j < rem { - curPartSize++ - } - - cStart := nextStart - cEnd := cStart + curPartSize - 1 - nextStart = cEnd + 1 - - startIndex[j], endIndex[j] = cStart, cEnd - } - return -} diff --git a/vendor/github.com/minio/minio-go/v6/api-datatypes.go b/vendor/github.com/minio/minio-go/v6/api-datatypes.go deleted file mode 100644 index 4e54f3415..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-datatypes.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "io" - "net/http" - "time" -) - -// BucketInfo container for bucket metadata. -type BucketInfo struct { - // The name of the bucket. - Name string `json:"name"` - // Date the bucket was created. - CreationDate time.Time `json:"creationDate"` -} - -// StringMap represents map with custom UnmarshalXML -type StringMap map[string]string - -// UnmarshalXML unmarshals the XML into a map of string to strings, -// creating a key in the map for each tag and setting it's value to the -// tags contents. -// -// The fact this function is on the pointer of Map is important, so that -// if m is nil it can be initialized, which is often the case if m is -// nested in another xml structurel. This is also why the first thing done -// on the first line is initialize it. -func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - *m = StringMap{} - type xmlMapEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` - } - for { - var e xmlMapEntry - err := d.Decode(&e) - if err == io.EOF { - break - } else if err != nil { - return err - } - (*m)[e.XMLName.Local] = e.Value - } - return nil -} - -// ObjectInfo container for object metadata. -type ObjectInfo struct { - // An ETag is optionally set to md5sum of an object. In case of multipart objects, - // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of - // each parts concatenated into one string. - ETag string `json:"etag"` - - Key string `json:"name"` // Name of the object - LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. - Size int64 `json:"size"` // Size in bytes of the object. - ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. - Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. - - // Collection of additional metadata on the object. - // eg: x-amz-meta-*, content-encoding etc. - Metadata http.Header `json:"metadata" xml:"-"` - - // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - UserMetadata StringMap `json:"userMetadata"` - - // Owner name. - Owner struct { - DisplayName string `json:"name"` - ID string `json:"id"` - } `json:"owner"` - - // The class of storage used to store the object. - StorageClass string `json:"storageClass"` - - // Error - Err error `json:"-"` -} - -// ObjectMultipartInfo container for multipart object metadata. -type ObjectMultipartInfo struct { - // Date and time at which the multipart upload was initiated. - Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Initiator initiator - Owner owner - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass string - - // Key of the object for which the multipart upload was initiated. - Key string - - // Size in bytes of the object. - Size int64 - - // Upload ID that identifies the multipart upload. - UploadID string `xml:"UploadId"` - - // Error - Err error -} diff --git a/vendor/github.com/minio/minio-go/v6/api-error-response.go b/vendor/github.com/minio/minio-go/v6/api-error-response.go deleted file mode 100644 index 726fdd274..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-error-response.go +++ /dev/null @@ -1,282 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "fmt" - "net/http" -) - -/* **** SAMPLE ERROR RESPONSE **** - - - AccessDenied - Access Denied - bucketName - objectName - F19772218238A85A - GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD - -*/ - -// ErrorResponse - Is the typed error returned by all API operations. -// ErrorResponse struct should be comparable since it is compared inside -// golang http API (https://github.com/golang/go/issues/29768) -type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - BucketName string - Key string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - - // Region where the bucket is located. This header is returned - // only in HEAD bucket and ListObjects response. - Region string - - // Underlying HTTP status code for the returned error - StatusCode int `xml:"-" json:"-"` -} - -// ToErrorResponse - Returns parsed ErrorResponse struct from body and -// http headers. -// -// For example: -// -// import s3 "github.com/minio/minio-go/v6" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... -func ToErrorResponse(err error) ErrorResponse { - switch err := err.(type) { - case ErrorResponse: - return err - default: - return ErrorResponse{} - } -} - -// Error - Returns S3 error string. -func (e ErrorResponse) Error() string { - if e.Message == "" { - msg, ok := s3ErrorResponseMap[e.Code] - if !ok { - msg = fmt.Sprintf("Error response code %s.", e.Code) - } - return msg - } - return e.Message -} - -// Common string for errors to report issue location in unexpected -// cases. -const ( - reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." -) - -// httpRespToErrorResponse returns a new encoded ErrorResponse -// structure as error. -func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { - if resp == nil { - msg := "Response is empty. " + reportIssue - return ErrInvalidArgument(msg) - } - - errResp := ErrorResponse{ - StatusCode: resp.StatusCode, - } - - err := xmlDecoder(resp.Body, &errResp) - // Xml decoding failed with no body, fall back to HTTP headers. - if err != nil { - switch resp.StatusCode { - case http.StatusNotFound: - if objectName == "" { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", - BucketName: bucketName, - } - } else { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchKey", - Message: "The specified key does not exist.", - BucketName: bucketName, - Key: objectName, - } - } - case http.StatusForbidden: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "AccessDenied", - Message: "Access Denied.", - BucketName: bucketName, - Key: objectName, - } - case http.StatusConflict: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "Conflict", - Message: "Bucket not empty.", - BucketName: bucketName, - } - case http.StatusPreconditionFailed: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "PreconditionFailed", - Message: s3ErrorResponseMap["PreconditionFailed"], - BucketName: bucketName, - Key: objectName, - } - default: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: resp.Status, - Message: resp.Status, - BucketName: bucketName, - } - } - } - - // Save hostID, requestID and region information - // from headers if not available through error XML. - if errResp.RequestID == "" { - errResp.RequestID = resp.Header.Get("x-amz-request-id") - } - if errResp.HostID == "" { - errResp.HostID = resp.Header.Get("x-amz-id-2") - } - if errResp.Region == "" { - errResp.Region = resp.Header.Get("x-amz-bucket-region") - } - if errResp.Code == "InvalidRegion" && errResp.Region != "" { - errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) - } - - return errResp -} - -// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. -func ErrTransferAccelerationBucket(bucketName string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", - BucketName: bucketName, - } -} - -// ErrEntityTooLarge - Input size is larger than supported maximum. -func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooLarge", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// ErrEntityTooSmall - Input size is smaller than supported minimum. -func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooSmall", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// ErrUnexpectedEOF - Unexpected end of file reached. -func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "UnexpectedEOF", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// ErrInvalidBucketName - Invalid bucket name response. -func ErrInvalidBucketName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", - Message: message, - RequestID: "minio", - } -} - -// ErrInvalidObjectName - Invalid object name response. -func ErrInvalidObjectName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchKey", - Message: message, - RequestID: "minio", - } -} - -// ErrInvalidObjectPrefix - Invalid object prefix response is -// similar to object name response. -var ErrInvalidObjectPrefix = ErrInvalidObjectName - -// ErrInvalidArgument - Invalid argument response. -func ErrInvalidArgument(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: message, - RequestID: "minio", - } -} - -// ErrNoSuchBucketPolicy - No Such Bucket Policy response -// The specified bucket does not have a bucket policy. -func ErrNoSuchBucketPolicy(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchBucketPolicy", - Message: message, - RequestID: "minio", - } -} - -// ErrAPINotSupported - API not supported response -// The specified API call is not supported -func ErrAPINotSupported(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotImplemented, - Code: "APINotSupported", - Message: message, - RequestID: "minio", - } -} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-lifecycle.go b/vendor/github.com/minio/minio-go/v6/api-get-lifecycle.go deleted file mode 100644 index a24d03e37..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-get-lifecycle.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// GetBucketLifecycle - get bucket lifecycle. -func (c Client) GetBucketLifecycle(bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketLifecycle, err := c.getBucketLifecycle(bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchLifecycleConfiguration" { - return "", nil - } - return "", err - } - return bucketLifecycle, nil -} - -// Request server for current bucket lifecycle. -func (c Client) getBucketLifecycle(bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute GET on bucket to get lifecycle. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - lifecycle := string(bucketLifecycleBuf) - return lifecycle, err -} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v6/api-get-object-acl.go deleted file mode 100644 index ea809492a..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-get-object-acl.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - "net/url" -) - -type accessControlPolicy struct { - Owner struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - } `xml:"Owner"` - AccessControlList struct { - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` - } `xml:"AccessControlList"` -} - -//GetObjectACL get object ACLs -func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) { - - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: url.Values{ - "acl": []string{""}, - }, - }) - if err != nil { - return nil, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - - res := &accessControlPolicy{} - - if err := xmlDecoder(resp.Body, res); err != nil { - return nil, err - } - - objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{}) - if err != nil { - return nil, err - } - - cannedACL := getCannedACL(res) - if cannedACL != "" { - objInfo.Metadata.Add("X-Amz-Acl", cannedACL) - return &objInfo, nil - } - - grantACL := getAmzGrantACL(res) - for k, v := range grantACL { - objInfo.Metadata[k] = v - } - - return &objInfo, nil -} - -func getCannedACL(aCPolicy *accessControlPolicy) string { - grants := aCPolicy.AccessControlList.Grant - - switch { - case len(grants) == 1: - if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { - return "private" - } - case len(grants) == 2: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - return "authenticated-read" - } - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { - return "public-read" - } - if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { - return "bucket-owner-read" - } - } - case len(grants) == 3: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { - return "public-read-write" - } - } - } - return "" -} - -func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { - grants := aCPolicy.AccessControlList.Grant - res := map[string][]string{} - - for _, g := range grants { - switch { - case g.Permission == "READ": - res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) - case g.Permission == "WRITE": - res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) - case g.Permission == "READ_ACP": - res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) - case g.Permission == "WRITE_ACP": - res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) - case g.Permission == "FULL_CONTROL": - res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) - } - } - return res -} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object-context.go b/vendor/github.com/minio/minio-go/v6/api-get-object-context.go deleted file mode 100644 index 3c3afd059..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-get-object-context.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "context" - -// GetObjectWithContext - returns an seekable, readable object. -// The options can be used to specify the GET request further. -func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - return c.getObjectWithContext(ctx, bucketName, objectName, opts) -} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object-file.go b/vendor/github.com/minio/minio-go/v6/api-get-object-file.go deleted file mode 100644 index 0684b2b3a..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-get-object-file.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "os" - "path/filepath" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// FGetObjectWithContext - download contents of an object to a local file. -// The options can be used to specify the GET request further. -func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { - return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts) -} - -// FGetObject - download contents of an object to a local file. -func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error { - return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) -} - -// fGetObjectWithContext - fgetObject wrapper function with context -func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Verify if destination already exists. - st, err := os.Stat(filePath) - if err == nil { - // If the destination exists and is a directory. - if st.IsDir() { - return ErrInvalidArgument("fileName is a directory.") - } - } - - // Proceed if file does not exist. return for all other errors. - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - // Extract top level directory. - objectDir, _ := filepath.Split(filePath) - if objectDir != "" { - // Create any missing top level directories. - if err := os.MkdirAll(objectDir, 0700); err != nil { - return err - } - } - - // Gather md5sum. - objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts}) - if err != nil { - return err - } - - // Write to a temporary file "fileName.part.minio" before saving. - filePartPath := filePath + objectStat.ETag + ".part.minio" - - // If exists, open in append mode. If not create it as a part file. - filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - return err - } - - // Issue Stat to get the current offset. - st, err = filePart.Stat() - if err != nil { - return err - } - - // Initialize get object request headers to set the - // appropriate range offsets to read from. - if st.Size() > 0 { - opts.SetRange(st.Size(), 0) - } - - // Seek to current position for incoming reader. - objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - return err - } - - // Write to the part file. - if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { - return err - } - - // Close the file before rename, this is specifically needed for Windows users. - if err = filePart.Close(); err != nil { - return err - } - - // Safely completed. Now commit by renaming to actual filename. - if err = os.Rename(filePartPath, filePath); err != nil { - return err - } - - // Return. - return nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object.go b/vendor/github.com/minio/minio-go/v6/api-get-object.go deleted file mode 100644 index 136782806..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-get-object.go +++ /dev/null @@ -1,661 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// GetObject - returns an seekable, readable object. -func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) -} - -// GetObject wrapper function that accepts a request context -func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - var httpReader io.ReadCloser - var objectInfo ObjectInfo - var err error - - // Create request channel. - reqCh := make(chan getRequest) - // Create response channel. - resCh := make(chan getResponse) - // Create done channel. - doneCh := make(chan struct{}) - - // This routine feeds partial object data as and when the caller reads. - go func() { - defer close(reqCh) - defer close(resCh) - - // Used to verify if etag of object has changed since last read. - var etag string - - // Loop through the incoming control messages and read data. - for { - select { - // When the done channel is closed exit our routine. - case <-doneCh: - // Close the http response body before returning. - // This ends the connection with the server. - if httpReader != nil { - httpReader.Close() - } - return - - // Gather incoming request. - case req := <-reqCh: - // If this is the first request we may not need to do a getObject request yet. - if req.isFirstReq { - // First request is a Read/ReadAt. - if req.isReadOp { - // Differentiate between wanting the whole object and just a range. - if req.isReadAt { - // If this is a ReadAt request only get the specified range. - // Range is set with respect to the offset and length of the buffer requested. - // Do not set objectInfo from the first readAt request because it will not get - // the whole object. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{Error: err} - return - } - etag = objectInfo.ETag - // Read at least firstReq.Buffer bytes, if not we have - // reached our EOF. - size, err := io.ReadFull(httpReader, req.Buffer) - if size > 0 && err == io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - Size: int(size), - Error: err, - didRead: true, - } - } else { - // First request is a Stat or Seek call. - // Only need to run a StatObject until an actual Read or ReadAt request comes through. - - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the go-routine. - return - } - etag = objectInfo.ETag - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } - } else if req.settingObjectInfo { // Request is just to get objectInfo. - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - if etag != "" { - opts.SetMatchETag(etag) - } - objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the goroutine. - return - } - // Send back the objectInfo. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } else { - // Offset changes fetch the new object at an Offset. - // Because the httpReader may not be set by the first - // request if it was a stat or seek it must be checked - // if the object has been read or not to only initialize - // new ones when they haven't been already. - // All readAt requests are new requests. - if req.DidOffsetChange || !req.beenRead { - if etag != "" { - opts.SetMatchETag(etag) - } - if httpReader != nil { - // Close previously opened http reader. - httpReader.Close() - } - // If this request is a readAt only get the specified range. - if req.isReadAt { - // Range is set with respect to the offset and length of the buffer requested. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { // Range is set with respect to the offset. - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{ - Error: err, - } - return - } - } - - // Read at least req.Buffer bytes, if not we have - // reached our EOF. - size, err := io.ReadFull(httpReader, req.Buffer) - if err == io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - // Reply back how much was read. - resCh <- getResponse{ - Size: int(size), - Error: err, - didRead: true, - objectInfo: objectInfo, - } - } - } - } - }() - - // Create a newObject through the information sent back by reqCh. - return newObject(reqCh, resCh, doneCh), nil -} - -// get request message container to communicate with internal -// go-routine. -type getRequest struct { - Buffer []byte - Offset int64 // readAt offset. - DidOffsetChange bool // Tracks the offset changes for Seek requests. - beenRead bool // Determines if this is the first time an object is being read. - isReadAt bool // Determines if this request is a request to a specific range - isReadOp bool // Determines if this request is a Read or Read/At request. - isFirstReq bool // Determines if this request is the first time an object is being accessed. - settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. -} - -// get response message container to reply back for the request. -type getResponse struct { - Size int - Error error - didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. - objectInfo ObjectInfo // Used for the first request. -} - -// Object represents an open object. It implements -// Reader, ReaderAt, Seeker, Closer for a HTTP stream. -type Object struct { - // Mutex. - mutex *sync.Mutex - - // User allocated and defined. - reqCh chan<- getRequest - resCh <-chan getResponse - doneCh chan<- struct{} - currOffset int64 - objectInfo ObjectInfo - - // Ask lower level to initiate data fetching based on currOffset - seekData bool - - // Keeps track of closed call. - isClosed bool - - // Keeps track of if this is the first call. - isStarted bool - - // Previous error saved for future calls. - prevErr error - - // Keeps track of if this object has been read yet. - beenRead bool - - // Keeps track of if objectInfo has been set yet. - objectInfoSet bool -} - -// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. -// Returns back the size of the buffer read, if anything was read, as well -// as any error encountered. For all first requests sent on the object -// it is also responsible for sending back the objectInfo. -func (o *Object) doGetRequest(request getRequest) (getResponse, error) { - o.reqCh <- request - response := <-o.resCh - - // Return any error to the top level. - if response.Error != nil { - return response, response.Error - } - - // This was the first request. - if !o.isStarted { - // The object has been operated on. - o.isStarted = true - } - // Set the objectInfo if the request was not readAt - // and it hasn't been set before. - if !o.objectInfoSet && !request.isReadAt { - o.objectInfo = response.objectInfo - o.objectInfoSet = true - } - // Set beenRead only if it has not been set before. - if !o.beenRead { - o.beenRead = response.didRead - } - // Data are ready on the wire, no need to reinitiate connection in lower level - o.seekData = false - - return response, nil -} - -// setOffset - handles the setting of offsets for -// Read/ReadAt/Seek requests. -func (o *Object) setOffset(bytesRead int64) error { - // Update the currentOffset. - o.currOffset += bytesRead - - if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { - return io.EOF - } - return nil -} - -// Read reads up to len(b) bytes into b. It returns the number of -// bytes read (0 <= n <= len(b)) and any error encountered. Returns -// io.EOF upon end of file. -func (o *Object) Read(b []byte) (n int, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is previous error saved from previous operation. - if o.prevErr != nil || o.isClosed { - return 0, o.prevErr - } - - // Create a new request. - readReq := getRequest{ - isReadOp: true, - beenRead: o.beenRead, - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readReq.isFirstReq = true - } - - // Ask to establish a new data fetch routine based on seekData flag - readReq.DidOffsetChange = o.seekData - readReq.Offset = o.currOffset - - // Send and receive from the first request. - response, err := o.doGetRequest(readReq) - if err != nil && err != io.EOF { - // Save the error for future calls. - o.prevErr = err - return response.Size, err - } - - // Bytes read. - bytesRead := int64(response.Size) - - // Set the new offset. - oerr := o.setOffset(bytesRead) - if oerr != nil { - // Save the error for future calls. - o.prevErr = oerr - return response.Size, oerr - } - - // Return the response. - return response.Size, err -} - -// Stat returns the ObjectInfo structure describing Object. -func (o *Object) Stat() (ObjectInfo, error) { - if o == nil { - return ObjectInfo{}, ErrInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { - return ObjectInfo{}, o.prevErr - } - - // This is the first request. - if !o.isStarted || !o.objectInfoSet { - // Send the request and get the response. - _, err := o.doGetRequest(getRequest{ - isFirstReq: !o.isStarted, - settingObjectInfo: !o.objectInfoSet, - }) - if err != nil { - o.prevErr = err - return ObjectInfo{}, err - } - } - - return o.objectInfo, nil -} - -// ReadAt reads len(b) bytes from the File starting at byte offset -// off. It returns the number of bytes read and the error, if any. -// ReadAt always returns a non-nil error when n < len(b). At end of -// file, that error is io.EOF. -func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is error which was saved in previous operation. - if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { - return 0, o.prevErr - } - - // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. - o.currOffset = offset - - // Can only compare offsets to size when size has been set. - if o.objectInfoSet { - // If offset is negative than we return io.EOF. - // If offset is greater than or equal to object size we return io.EOF. - if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { - return 0, io.EOF - } - } - - // Create the new readAt request. - readAtReq := getRequest{ - isReadOp: true, - isReadAt: true, - DidOffsetChange: true, // Offset always changes. - beenRead: o.beenRead, // Set if this is the first request to try and read. - Offset: offset, // Set the offset. - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readAtReq.isFirstReq = true - } - - // Send and receive from the first request. - response, err := o.doGetRequest(readAtReq) - if err != nil && err != io.EOF { - // Save the error. - o.prevErr = err - return response.Size, err - } - // Bytes read. - bytesRead := int64(response.Size) - // There is no valid objectInfo yet - // to compare against for EOF. - if !o.objectInfoSet { - // Update the currentOffset. - o.currOffset += bytesRead - } else { - // If this was not the first request update - // the offsets and compare against objectInfo - // for EOF. - oerr := o.setOffset(bytesRead) - if oerr != nil { - o.prevErr = oerr - return response.Size, oerr - } - } - return response.Size, err -} - -// Seek sets the offset for the next Read or Write to offset, -// interpreted according to whence: 0 means relative to the -// origin of the file, 1 means relative to the current offset, -// and 2 means relative to the end. -// Seek returns the new offset and an error, if any. -// -// Seeking to a negative offset is an error. Seeking to any positive -// offset is legal, subsequent io operations succeed until the -// underlying object is not closed. -func (o *Object) Seek(offset int64, whence int) (n int64, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // At EOF seeking is legal allow only io.EOF, for any other errors we return. - if o.prevErr != nil && o.prevErr != io.EOF { - return 0, o.prevErr - } - - // Negative offset is valid for whence of '2'. - if offset < 0 && whence != 2 { - return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) - } - - // This is the first request. So before anything else - // get the ObjectInfo. - if !o.isStarted || !o.objectInfoSet { - // Create the new Seek request. - seekReq := getRequest{ - isReadOp: false, - Offset: offset, - isFirstReq: true, - } - // Send and receive from the seek request. - _, err := o.doGetRequest(seekReq) - if err != nil { - // Save the error. - o.prevErr = err - return 0, err - } - } - - // Switch through whence. - switch whence { - default: - return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) - case 0: - if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset = offset - case 1: - if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset += offset - case 2: - // If we don't know the object size return an error for io.SeekEnd - if o.objectInfo.Size < 0 { - return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") - } - // Seeking to positive offset is valid for whence '2', but - // since we are backing a Reader we have reached 'EOF' if - // offset is positive. - if offset > 0 { - return 0, io.EOF - } - // Seeking to negative position not allowed for whence. - if o.objectInfo.Size+offset < 0 { - return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) - } - o.currOffset = o.objectInfo.Size + offset - } - // Reset the saved error since we successfully seeked, let the Read - // and ReadAt decide. - if o.prevErr == io.EOF { - o.prevErr = nil - } - - // Ask lower level to fetch again from source - o.seekData = true - - // Return the effective offset. - return o.currOffset, nil -} - -// Close - The behavior of Close after the first call returns error -// for subsequent Close() calls. -func (o *Object) Close() (err error) { - if o == nil { - return ErrInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // if already closed return an error. - if o.isClosed { - return o.prevErr - } - - // Close successfully. - close(o.doneCh) - - // Save for future operations. - errMsg := "Object is already closed. Bad file descriptor." - o.prevErr = errors.New(errMsg) - // Save here that we closed done channel successfully. - o.isClosed = true - return nil -} - -// newObject instantiates a new *minio.Object* -// ObjectInfo will be set by setObjectInfo -func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { - return &Object{ - mutex: &sync.Mutex{}, - reqCh: reqCh, - resCh: resCh, - doneCh: doneCh, - } -} - -// getObject - retrieve object from Object Storage. -// -// Additionally this function also takes range arguments to download the specified -// range bytes of an object. Setting offset and length = 0 will download the full object. -// -// For more information about the HTTP Range header. -// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { - // Validate input arguments. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, ObjectInfo{}, nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, ObjectInfo{}, nil, err - } - - // Execute GET on objectName. - resp, err := c.executeMethod(ctx, "GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: opts.Header(), - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - return nil, ObjectInfo{}, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Trim off the odd double quotes from ETag in the beginning and end. - md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - md5sum = strings.TrimSuffix(md5sum, "\"") - - // Parse the date. - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - msg := "Last-Modified time format not recognized. " + reportIssue - return nil, ObjectInfo{}, nil, ErrorResponse{ - Code: "InternalError", - Message: msg, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - - // Get content-type. - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - objectStat := ObjectInfo{ - ETag: md5sum, - Key: objectName, - Size: resp.ContentLength, - LastModified: date, - ContentType: contentType, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: extractObjMetadata(resp.Header), - } - - // do not close body here, caller will close - return resp.Body, objectStat, resp.Header, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-options.go b/vendor/github.com/minio/minio-go/v6/api-get-options.go deleted file mode 100644 index 538fd1a05..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-get-options.go +++ /dev/null @@ -1,128 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "fmt" - "net/http" - "time" - - "github.com/minio/minio-go/v6/pkg/encrypt" -) - -// GetObjectOptions are used to specify additional headers or options -// during GET requests. -type GetObjectOptions struct { - headers map[string]string - ServerSideEncryption encrypt.ServerSide -} - -// StatObjectOptions are used to specify additional headers or options -// during GET info/stat requests. -type StatObjectOptions struct { - GetObjectOptions -} - -// Header returns the http.Header representation of the GET options. -func (o GetObjectOptions) Header() http.Header { - headers := make(http.Header, len(o.headers)) - for k, v := range o.headers { - headers.Set(k, v) - } - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - return headers -} - -// Set adds a key value pair to the options. The -// key-value pair will be part of the HTTP GET request -// headers. -func (o *GetObjectOptions) Set(key, value string) { - if o.headers == nil { - o.headers = make(map[string]string) - } - o.headers[http.CanonicalHeaderKey(key)] = value -} - -// SetMatchETag - set match etag. -func (o *GetObjectOptions) SetMatchETag(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - o.Set("If-Match", "\""+etag+"\"") - return nil -} - -// SetMatchETagExcept - set match etag except. -func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - o.Set("If-None-Match", "\""+etag+"\"") - return nil -} - -// SetUnmodified - set unmodified time since. -func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetModified - set modified time since. -func (o *GetObjectOptions) SetModified(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetRange - set the start and end offset of the object to be read. -// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. -func (o *GetObjectOptions) SetRange(start, end int64) error { - switch { - case start == 0 && end < 0: - // Read last '-end' bytes. `bytes=-N`. - o.Set("Range", fmt.Sprintf("bytes=%d", end)) - case 0 < start && end == 0: - // Read everything starting from offset - // 'start'. `bytes=N-`. - o.Set("Range", fmt.Sprintf("bytes=%d-", start)) - case 0 <= start && start <= end: - // Read everything starting at 'start' till the - // 'end'. `bytes=N-M` - o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) - default: - // All other cases such as - // bytes=-3- - // bytes=5-3 - // bytes=-2-4 - // bytes=-3-0 - // bytes=-3--2 - // are invalid. - return ErrInvalidArgument( - fmt.Sprintf( - "Invalid range specified: start=%d end=%d", - start, end)) - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-policy.go b/vendor/github.com/minio/minio-go/v6/api-get-policy.go deleted file mode 100644 index bc1d10530..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-get-policy.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// GetBucketPolicy - get bucket policy at a given path. -func (c Client) GetBucketPolicy(bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketPolicy, err := c.getBucketPolicy(bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchBucketPolicy" { - return "", nil - } - return "", err - } - return bucketPolicy, nil -} - -// Request server for current bucket policy. -func (c Client) getBucketPolicy(bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - policy := string(bucketPolicyBuf) - return policy, err -} diff --git a/vendor/github.com/minio/minio-go/v6/api-list.go b/vendor/github.com/minio/minio-go/v6/api-list.go deleted file mode 100644 index 09c1565cb..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-list.go +++ /dev/null @@ -1,811 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// ListBuckets list all buckets owned by this authenticated user. -// -// This call requires explicit authentication, no anonymous requests are -// allowed for listing buckets. -// -// api := client.New(....) -// for message := range api.ListBuckets() { -// fmt.Println(message) -// } -// -func (c Client) ListBuckets() ([]BucketInfo, error) { - // Execute GET on service. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex}) - defer closeResponse(resp) - if err != nil { - return nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, "", "") - } - } - listAllMyBucketsResult := listAllMyBucketsResult{} - err = xmlDecoder(resp.Body, &listAllMyBucketsResult) - if err != nil { - return nil, err - } - return listAllMyBucketsResult.Buckets.Bucket, nil -} - -/// Bucket Read Operations. - -// ListObjectsV2WithMetadata lists all objects matching the objectPrefix -// from the specified bucket. If recursion is enabled it would list -// all subdirectories and all its contents. This call adds -// UserMetadata information as well for each object. -// -// This is a MinIO extension, this will not work against other S3 -// compatible object storage vendors. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel for pro-actively closing the internal go -// routine. If you enable recursive as 'true' this function will -// return back all the objects in a given bucket name and object -// prefix. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recursively list all objects in 'mytestbucket' -// recursive := true -// // Add metadata -// metadata := true -// for message := range api.ListObjectsV2WithMetadata("mytestbucket", "starthere", recursive, doneCh) { -// fmt.Println(message) -// } -// -func (c Client) ListObjectsV2WithMetadata(bucketName, objectPrefix string, recursive bool, - doneCh <-chan struct{}) <-chan ObjectInfo { - return c.listObjectsV2(bucketName, objectPrefix, recursive, true, doneCh) -} - -func (c Client) listObjectsV2(bucketName, objectPrefix string, recursive, metadata bool, doneCh <-chan struct{}) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if recursive { - // If recursive we do not delimit. - delimiter = "" - } - - // Return object owner information by default - fetchOwner := true - - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer close(objectStatCh) - // Save continuationToken for next request. - var continuationToken string - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, - fetchOwner, metadata, delimiter, 0, "") - if err != nil { - objectStatCh <- ObjectInfo{ - Err: err, - } - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case objectStatCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // If continuation token present, save it for next request. - if result.NextContinuationToken != "" { - continuationToken = result.NextContinuationToken - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectStatCh) - return objectStatCh -} - -// ListObjectsV2 lists all objects matching the objectPrefix from -// the specified bucket. If recursion is enabled it would list -// all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel for pro-actively closing the internal go -// routine. If you enable recursive as 'true' this function will -// return back all the objects in a given bucket name and object -// prefix. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recursively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { -// fmt.Println(message) -// } -// -func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { - return c.listObjectsV2(bucketName, objectPrefix, recursive, false, doneCh) -} - -// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?continuation-token - Used to continue iterating over a set of objects -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -// ?start-after - Specifies the key to start after when listing objects in a bucket. -// ?metadata - Specifies if we want metadata for the objects as part of list operation. -func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketV2Result{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketV2Result{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Always set list-type in ListObjects V2 - urlValues.Set("list-type", "2") - - if metadata { - urlValues.Set("metadata", "true") - } - - // Always set encoding-type in ListObjects V2 - urlValues.Set("encoding-type", "url") - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set continuation token - if continuationToken != "" { - urlValues.Set("continuation-token", continuationToken) - } - - // Fetch owner when listing - if fetchOwner { - urlValues.Set("fetch-owner", "true") - } - - // Set max keys. - if maxkeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - } - - // Set start-after - if startAfter != "" { - urlValues.Set("start-after", startAfter) - } - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketV2Result{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Decode listBuckets XML. - listBucketResult := ListBucketV2Result{} - if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { - return listBucketResult, err - } - - // This is an additional verification check to make - // sure proper responses are received. - if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { - return listBucketResult, errors.New("Truncated response should have continuation token set") - } - - for i, obj := range listBucketResult.Contents { - listBucketResult.Contents[i].Key, err = url.QueryUnescape(obj.Key) - if err != nil { - return listBucketResult, err - } - } - - for i, obj := range listBucketResult.CommonPrefixes { - listBucketResult.CommonPrefixes[i].Prefix, err = url.QueryUnescape(obj.Prefix) - if err != nil { - return listBucketResult, err - } - } - - // Success. - return listBucketResult, nil -} - -// ListObjects - (List Objects) - List some objects or all recursively. -// -// ListObjects lists all objects matching the objectPrefix from -// the specified bucket. If recursion is enabled it would list -// all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel for pro-actively closing the internal go -// routine. If you enable recursive as 'true' this function will -// return back all the objects in a given bucket name and object -// prefix. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { -// fmt.Println(message) -// } -// -func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if recursive { - // If recursive we do not delimit. - delimiter = "" - } - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer close(objectStatCh) - // Save marker for next request. - var marker string - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 0) - if err != nil { - objectStatCh <- ObjectInfo{ - Err: err, - } - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - // Save the marker. - marker = object.Key - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case objectStatCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // If next marker present, save it for next request. - if result.NextMarker != "" { - marker = result.NextMarker - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectStatCh) - return objectStatCh -} - -// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?marker - Specifies the key to start with when listing objects in a bucket. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketResult{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketResult{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set object marker. - if objectMarker != "" { - urlValues.Set("marker", objectMarker) - } - - // Set max keys. - if maxkeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - } - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode listBuckets XML. - listBucketResult := ListBucketResult{} - err = xmlDecoder(resp.Body, &listBucketResult) - if err != nil { - return listBucketResult, err - } - - for i, obj := range listBucketResult.Contents { - listBucketResult.Contents[i].Key, err = url.QueryUnescape(obj.Key) - if err != nil { - return listBucketResult, err - } - } - - for i, obj := range listBucketResult.CommonPrefixes { - listBucketResult.CommonPrefixes[i].Prefix, err = url.QueryUnescape(obj.Prefix) - if err != nil { - return listBucketResult, err - } - } - - if listBucketResult.NextMarker != "" { - listBucketResult.NextMarker, err = url.QueryUnescape(listBucketResult.NextMarker) - if err != nil { - return listBucketResult, err - } - } - - return listBucketResult, nil -} - -// ListIncompleteUploads - List incompletely uploaded multipart objects. -// -// ListIncompleteUploads lists all incompleted objects matching the -// objectPrefix from the specified bucket. If recursion is enabled -// it would list all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel to pro-actively close the internal go routine. -// If you enable recursive as 'true' this function will return back all -// the multipart objects in a given bucket name. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } -// -func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { - // Turn on size aggregation of individual parts. - isAggregateSize := true - return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) -} - -// listIncompleteUploads lists all incomplete uploads. -func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { - // Allocate channel for multipart uploads. - objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) - // Delimiter is set to "/" by default. - delimiter := "/" - if recursive { - // If recursive do not delimit. - delimiter = "" - } - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { - defer close(objectMultipartStatCh) - // object and upload ID marker for future requests. - var objectMarker string - var uploadIDMarker string - for { - // list all multipart uploads. - result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) - if err != nil { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return - } - objectMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - - // Send all multipart uploads. - for _, obj := range result.Uploads { - // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. - if aggregateSize { - // Get total multipart size. - obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) - if err != nil { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - continue - } - } - select { - // Send individual uploads here. - case objectMultipartStatCh <- obj: - // If done channel return here. - case <-doneCh: - return - } - } - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send delimited prefixes here. - case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: - // If done channel return here. - case <-doneCh: - return - } - } - // Listing ends if result not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectMultipartStatCh) - // return. - return objectMultipartStatCh -} - -// listMultipartUploads - (List Multipart Uploads). -// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. -// request parameters. :- -// --------- -// ?key-marker - Specifies the multipart upload after which listing should begin. -// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set uploads. - urlValues.Set("uploads", "") - // Set object key marker. - if keyMarker != "" { - urlValues.Set("key-marker", keyMarker) - } - // Set upload id marker. - if uploadIDMarker != "" { - urlValues.Set("upload-id-marker", uploadIDMarker) - } - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", prefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // maxUploads should be 1000 or less. - if maxUploads > 0 { - // Set max-uploads. - urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) - } - - // Execute GET on bucketName to list multipart uploads. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListMultipartUploadsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode response body. - listMultipartUploadsResult := ListMultipartUploadsResult{} - err = xmlDecoder(resp.Body, &listMultipartUploadsResult) - if err != nil { - return listMultipartUploadsResult, err - } - - listMultipartUploadsResult.NextKeyMarker, err = url.QueryUnescape(listMultipartUploadsResult.NextKeyMarker) - if err != nil { - return listMultipartUploadsResult, err - } - - listMultipartUploadsResult.NextUploadIDMarker, err = url.QueryUnescape(listMultipartUploadsResult.NextUploadIDMarker) - if err != nil { - return listMultipartUploadsResult, err - } - - for i, obj := range listMultipartUploadsResult.Uploads { - listMultipartUploadsResult.Uploads[i].Key, err = url.QueryUnescape(obj.Key) - if err != nil { - return listMultipartUploadsResult, err - } - } - - for i, obj := range listMultipartUploadsResult.CommonPrefixes { - listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = url.QueryUnescape(obj.Prefix) - if err != nil { - return listMultipartUploadsResult, err - } - } - - return listMultipartUploadsResult, nil -} - -// listObjectParts list all object parts recursively. -func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { - // Part number marker for the next batch of request. - var nextPartNumberMarker int - partsInfo = make(map[int]ObjectPart) - for { - // Get list of uploaded parts a maximum of 1000 per request. - listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) - if err != nil { - return nil, err - } - // Append to parts info. - for _, part := range listObjPartsResult.ObjectParts { - // Trim off the odd double quotes from ETag in the beginning and end. - part.ETag = strings.TrimPrefix(part.ETag, "\"") - part.ETag = strings.TrimSuffix(part.ETag, "\"") - partsInfo[part.PartNumber] = part - } - // Keep part number marker, for the next iteration. - nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker - // Listing ends result is not truncated, return right here. - if !listObjPartsResult.IsTruncated { - break - } - } - - // Return all the parts. - return partsInfo, nil -} - -// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. -func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { - var uploadIDs []string - // Make list incomplete uploads recursive. - isRecursive := true - // Turn off size aggregation of individual parts, in this request. - isAggregateSize := false - // Create done channel to cleanup the routine. - doneCh := make(chan struct{}) - defer close(doneCh) - // List all incomplete uploads. - for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { - if mpUpload.Err != nil { - return nil, mpUpload.Err - } - if objectName == mpUpload.Key { - uploadIDs = append(uploadIDs, mpUpload.UploadID) - } - } - // Return the latest upload id. - return uploadIDs, nil -} - -// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. -func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { - // Iterate over all parts and aggregate the size. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - for _, partInfo := range partsInfo { - size += partInfo.Size - } - return size, nil -} - -// listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded -// for a specific multipart upload -// -// You can use the request parameters as selection criteria to return -// a subset of the uploads in a bucket, request parameters :- -// --------- -// ?part-number-marker - Specifies the part after which listing should -// begin. -// ?max-parts - Maximum parts to be listed per request. -func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number marker. - urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - // maxParts should be 1000 or less. - if maxParts > 0 { - // Set max parts. - urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) - } - - // Execute GET on objectName to get list of parts. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListObjectPartsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode list object parts XML. - listObjectPartsResult := ListObjectPartsResult{} - err = xmlDecoder(resp.Body, &listObjectPartsResult) - if err != nil { - return listObjectPartsResult, err - } - return listObjectPartsResult, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-notification.go b/vendor/github.com/minio/minio-go/v6/api-notification.go deleted file mode 100644 index 0480c21eb..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-notification.go +++ /dev/null @@ -1,231 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bufio" - "context" - "encoding/json" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// GetBucketNotification - get bucket notification at a given path. -func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return BucketNotification{}, err - } - notification, err := c.getBucketNotification(bucketName) - if err != nil { - return BucketNotification{}, err - } - return notification, nil -} - -// Request server for notification rules. -func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { - urlValues := make(url.Values) - urlValues.Set("notification", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return BucketNotification{}, err - } - return processBucketNotificationResponse(bucketName, resp) - -} - -// processes the GetNotification http response from the server. -func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - return BucketNotification{}, errResponse - } - var bucketNotification BucketNotification - err := xmlDecoder(resp.Body, &bucketNotification) - if err != nil { - return BucketNotification{}, err - } - return bucketNotification, nil -} - -// Indentity represents the user id, this is a compliance field. -type identity struct { - PrincipalID string `json:"principalId"` -} - -// Notification event bucket metadata. -type bucketMeta struct { - Name string `json:"name"` - OwnerIdentity identity `json:"ownerIdentity"` - ARN string `json:"arn"` -} - -// Notification event object metadata. -type objectMeta struct { - Key string `json:"key"` - Size int64 `json:"size,omitempty"` - ETag string `json:"eTag,omitempty"` - VersionID string `json:"versionId,omitempty"` - Sequencer string `json:"sequencer"` -} - -// Notification event server specific metadata. -type eventMeta struct { - SchemaVersion string `json:"s3SchemaVersion"` - ConfigurationID string `json:"configurationId"` - Bucket bucketMeta `json:"bucket"` - Object objectMeta `json:"object"` -} - -// sourceInfo represents information on the client that -// triggered the event notification. -type sourceInfo struct { - Host string `json:"host"` - Port string `json:"port"` - UserAgent string `json:"userAgent"` -} - -// NotificationEvent represents an Amazon an S3 bucket notification event. -type NotificationEvent struct { - EventVersion string `json:"eventVersion"` - EventSource string `json:"eventSource"` - AwsRegion string `json:"awsRegion"` - EventTime string `json:"eventTime"` - EventName string `json:"eventName"` - UserIdentity identity `json:"userIdentity"` - RequestParameters map[string]string `json:"requestParameters"` - ResponseElements map[string]string `json:"responseElements"` - S3 eventMeta `json:"s3"` - Source sourceInfo `json:"source"` -} - -// NotificationInfo - represents the collection of notification events, additionally -// also reports errors if any while listening on bucket notifications. -type NotificationInfo struct { - Records []NotificationEvent - Err error -} - -// ListenBucketNotification - listen on bucket notifications. -func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { - notificationInfoCh := make(chan NotificationInfo, 1) - // Only success, start a routine to start reading line by line. - go func(notificationInfoCh chan<- NotificationInfo) { - defer close(notificationInfoCh) - - // Validate the bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - notificationInfoCh <- NotificationInfo{ - Err: err, - } - return - } - - // Check ARN partition to verify if listening bucket is supported - if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { - notificationInfoCh <- NotificationInfo{ - Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), - } - return - } - - // Continuously run and listen on bucket notification. - // Create a done channel to control 'ListObjects' go routine. - retryDoneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(retryDoneCh) - - // Prepare urlValues to pass into the request on every loop - urlValues := make(url.Values) - urlValues.Set("prefix", prefix) - urlValues.Set("suffix", suffix) - urlValues["events"] = events - - // Wait on the jitter retry loop. - for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - notificationInfoCh <- NotificationInfo{ - Err: err, - } - return - } - - // Validate http response, upon error return quickly. - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - notificationInfoCh <- NotificationInfo{ - Err: errResponse, - } - return - } - - // Initialize a new bufio scanner, to read line by line. - bio := bufio.NewScanner(resp.Body) - - // Unmarshal each line, returns marshalled values. - for bio.Scan() { - var notificationInfo NotificationInfo - if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { - // Unexpected error during json unmarshal, send - // the error to caller for actionable as needed. - notificationInfoCh <- NotificationInfo{ - Err: err, - } - closeResponse(resp) - continue - } - // Send notificationInfo - select { - case notificationInfoCh <- notificationInfo: - case <-doneCh: - closeResponse(resp) - return - } - } - if err = bio.Err(); err != nil { - notificationInfoCh <- NotificationInfo{ - Err: err, - } - } - // Close current connection before looping further. - closeResponse(resp) - } - }(notificationInfoCh) - - // Returns the notification info channel, for caller to start reading from. - return notificationInfoCh -} diff --git a/vendor/github.com/minio/minio-go/v6/api-object-lock.go b/vendor/github.com/minio/minio-go/v6/api-object-lock.go deleted file mode 100644 index c30ab3258..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-object-lock.go +++ /dev/null @@ -1,232 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// RetentionMode - object retention mode. -type RetentionMode string - -const ( - // Governance - goverance mode. - Governance RetentionMode = "GOVERNANCE" - - // Compliance - compliance mode. - Compliance RetentionMode = "COMPLIANCE" -) - -func (r RetentionMode) String() string { - return string(r) -} - -// IsValid - check whether this retention mode is valid or not. -func (r RetentionMode) IsValid() bool { - return r == Governance || r == Compliance -} - -// ValidityUnit - retention validity unit. -type ValidityUnit string - -const ( - // Days - denotes no. of days. - Days ValidityUnit = "DAYS" - - // Years - denotes no. of years. - Years ValidityUnit = "YEARS" -) - -func (unit ValidityUnit) String() string { - return string(unit) -} - -// IsValid - check whether this validity unit is valid or not. -func (unit ValidityUnit) isValid() bool { - return unit == Days || unit == Years -} - -// Retention - bucket level retention configuration. -type Retention struct { - Mode RetentionMode - Validity time.Duration -} - -func (r Retention) String() string { - return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) -} - -// IsEmpty - returns whether retention is empty or not. -func (r Retention) IsEmpty() bool { - return r.Mode == "" || r.Validity == 0 -} - -// objectLockConfig - object lock configuration specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html -type objectLockConfig struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"ObjectLockConfiguration"` - ObjectLockEnabled string `xml:"ObjectLockEnabled"` - Rule *struct { - DefaultRetention struct { - Mode RetentionMode `xml:"Mode"` - Days *uint `xml:"Days"` - Years *uint `xml:"Years"` - } `xml:"DefaultRetention"` - } `xml:"Rule,omitempty"` -} - -func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { - config := &objectLockConfig{ - ObjectLockEnabled: "Enabled", - } - - if mode != nil && validity != nil && unit != nil { - if !mode.IsValid() { - return nil, fmt.Errorf("invalid retention mode `%v`", mode) - } - - if !unit.isValid() { - return nil, fmt.Errorf("invalid validity unit `%v`", unit) - } - - config.Rule = &struct { - DefaultRetention struct { - Mode RetentionMode `xml:"Mode"` - Days *uint `xml:"Days"` - Years *uint `xml:"Years"` - } `xml:"DefaultRetention"` - }{} - - config.Rule.DefaultRetention.Mode = *mode - if *unit == Days { - config.Rule.DefaultRetention.Days = validity - } else { - config.Rule.DefaultRetention.Years = validity - } - - return config, nil - } - - if mode == nil && validity == nil && unit == nil { - return config, nil - } - - return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") -} - -// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c Client) SetBucketObjectLockConfig(bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("object-lock", "") - - config, err := newObjectLockConfig(mode, validity, unit) - if err != nil { - return err - } - - configData, err := xml.Marshal(config) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(configData), - contentLength: int64(len(configData)), - contentMD5Base64: sumMD5Base64(configData), - contentSHA256Hex: sum256Hex(configData), - } - - // Execute PUT bucket object lock configuration. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// GetBucketObjectLockConfig gets object lock configuration of given bucket. -func (c Client) GetBucketObjectLockConfig(bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, nil, nil, err - } - - urlValues := make(url.Values) - urlValues.Set("object-lock", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return nil, nil, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") - } - } - config := &objectLockConfig{} - if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { - return nil, nil, nil, err - } - - if config.Rule != nil { - mode = &config.Rule.DefaultRetention.Mode - if config.Rule.DefaultRetention.Days != nil { - validity = config.Rule.DefaultRetention.Days - days := Days - unit = &days - } else { - validity = config.Rule.DefaultRetention.Years - years := Years - unit = &years - } - - return mode, validity, unit, nil - } - - return nil, nil, nil, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-object-retention.go b/vendor/github.com/minio/minio-go/v6/api-object-retention.go deleted file mode 100644 index 8aa08f073..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-object-retention.go +++ /dev/null @@ -1,168 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// objectRetention - object retention specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html -type objectRetention struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"Retention"` - Mode RetentionMode `xml:"Mode"` - RetainUntilDate time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate"` -} - -func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { - if mode == nil { - return nil, fmt.Errorf("Mode not set") - } - - if date == nil { - return nil, fmt.Errorf("RetainUntilDate not set") - } - - if !mode.IsValid() { - return nil, fmt.Errorf("invalid retention mode `%v`", mode) - } - objectRetention := &objectRetention{ - Mode: *mode, - RetainUntilDate: *date, - } - return objectRetention, nil -} - -// PutObjectRetentionOptions represents options specified by user for PutObject call -type PutObjectRetentionOptions struct { - GovernanceBypass bool - Mode *RetentionMode - RetainUntilDate *time.Time - VersionID string -} - -// PutObjectRetention : sets object retention for a given object and versionID. -func (c Client) PutObjectRetention(bucketName, objectName string, opts PutObjectRetentionOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("retention", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) - if err != nil { - return err - } - - retentionData, err := xml.Marshal(retention) - if err != nil { - return err - } - - // Build headers. - headers := make(http.Header) - - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set("x-amz-bypass-governance-retention", "True") - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: bytes.NewReader(retentionData), - contentLength: int64(len(retentionData)), - contentMD5Base64: sumMD5Base64(retentionData), - contentSHA256Hex: sum256Hex(retentionData), - customHeader: headers, - } - - // Execute PUT Object Retention. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return nil -} - -// GetObjectRetention gets retention of given object. -func (c Client) GetObjectRetention(bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, nil, err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, nil, err - } - urlValues := make(url.Values) - urlValues.Set("retention", "") - if versionID != "" { - urlValues.Set("versionId", versionID) - } - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return nil, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - retention := &objectRetention{} - if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { - return nil, nil, err - } - - return &retention.Mode, &retention.RetainUntilDate, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-presigned.go b/vendor/github.com/minio/minio-go/v6/api-presigned.go deleted file mode 100644 index e2d68b0ec..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-presigned.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "errors" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v6/pkg/s3signer" - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// presignURL - Returns a presigned URL for an input 'method'. -// Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - // Input validation. - if method == "" { - return nil, ErrInvalidArgument("method cannot be empty.") - } - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err = isValidExpiry(expires); err != nil { - return nil, err - } - - // Convert expires into seconds. - expireSeconds := int64(expires / time.Second) - reqMetadata := requestMetadata{ - presignURL: true, - bucketName: bucketName, - objectName: objectName, - expires: expireSeconds, - queryValues: reqParams, - } - - // Instantiate a new request. - // Since expires is set newRequest will presign the request. - var req *http.Request - if req, err = c.newRequest(method, reqMetadata); err != nil { - return nil, err - } - return req.URL, nil -} - -// PresignedGetObject - Returns a presigned URL to access an object -// data without credentials. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL("GET", bucketName, objectName, expires, reqParams) -} - -// PresignedHeadObject - Returns a presigned URL to access object -// metadata without credentials. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) -} - -// PresignedPutObject - Returns a presigned URL to upload an object -// without credentials. URL can have a maximum expiry of upto 7days -// or a minimum of 1sec. -func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL("PUT", bucketName, objectName, expires, nil) -} - -// Presign - returns a presigned URL for any http method of your choice -// along with custom request params. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. -func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - return c.presignURL(method, bucketName, objectName, expires, reqParams) -} - -// PresignedPostPolicy - Returns POST urlString, form data to upload an object. -func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { - // Validate input arguments. - if p.expiration.IsZero() { - return nil, nil, errors.New("Expiration time must be specified") - } - if _, ok := p.formData["key"]; !ok { - return nil, nil, errors.New("object key must be specified") - } - if _, ok := p.formData["bucket"]; !ok { - return nil, nil, errors.New("bucket name must be specified") - } - - bucketName := p.formData["bucket"] - // Fetch the bucket location. - location, err := c.getBucketLocation(bucketName) - if err != nil { - return nil, nil, err - } - - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) - - u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) - if err != nil { - return nil, nil, err - } - - // Get credentials from the configured credentials provider. - credValues, err := c.credsProvider.Get() - if err != nil { - return nil, nil, err - } - - var ( - signerType = credValues.SignerType - sessionToken = credValues.SessionToken - accessKeyID = credValues.AccessKeyID - secretAccessKey = credValues.SecretAccessKey - ) - - if signerType.IsAnonymous() { - return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") - } - - // Keep time. - t := time.Now().UTC() - // For signature version '2' handle here. - if signerType.IsV2() { - policyBase64 := p.base64() - p.formData["policy"] = policyBase64 - // For Google endpoint set this value to be 'GoogleAccessId'. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - p.formData["GoogleAccessId"] = accessKeyID - } else { - // For all other endpoints set this value to be 'AWSAccessKeyId'. - p.formData["AWSAccessKeyId"] = accessKeyID - } - // Sign the policy. - p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) - return u, p.formData, nil - } - - // Add date policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-date", - value: t.Format(iso8601DateFormat), - }); err != nil { - return nil, nil, err - } - - // Add algorithm policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-algorithm", - value: signV4Algorithm, - }); err != nil { - return nil, nil, err - } - - // Add a credential policy. - credential := s3signer.GetCredential(accessKeyID, location, t) - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-credential", - value: credential, - }); err != nil { - return nil, nil, err - } - - if sessionToken != "" { - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-security-token", - value: sessionToken, - }); err != nil { - return nil, nil, err - } - } - - // Get base64 encoded policy. - policyBase64 := p.base64() - - // Fill in the form data. - p.formData["policy"] = policyBase64 - p.formData["x-amz-algorithm"] = signV4Algorithm - p.formData["x-amz-credential"] = credential - p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - if sessionToken != "" { - p.formData["x-amz-security-token"] = sessionToken - } - p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) - return u, p.formData, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-bucket.go b/vendor/github.com/minio/minio-go/v6/api-put-bucket.go deleted file mode 100644 index 0041ce179..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-bucket.go +++ /dev/null @@ -1,383 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -/// Bucket operations - -func (c Client) makeBucket(bucketName string, location string, objectLockEnabled bool) (err error) { - defer func() { - // Save the location into cache on a successful makeBucket response. - if err == nil { - c.bucketLocCache.Set(bucketName, location) - } - }() - - // Validate the input arguments. - if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { - return err - } - - // If location is empty, treat is a default region 'us-east-1'. - if location == "" { - location = "us-east-1" - // For custom region clients, default - // to custom region instead not 'us-east-1'. - if c.region != "" { - location = c.region - } - } - // PUT bucket request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - bucketLocation: location, - } - - if objectLockEnabled { - headers := make(http.Header) - headers.Add("x-amz-bucket-object-lock-enabled", "true") - reqMetadata.customHeader = headers - } - - // If location is not 'us-east-1' create bucket location config. - if location != "us-east-1" && location != "" { - createBucketConfig := createBucketConfiguration{} - createBucketConfig.Location = location - var createBucketConfigBytes []byte - createBucketConfigBytes, err = xml.Marshal(createBucketConfig) - if err != nil { - return err - } - reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) - reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) - reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) - reqMetadata.contentLength = int64(len(createBucketConfigBytes)) - } - - // Execute PUT to create a new bucket. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Success. - return nil -} - -// MakeBucket creates a new bucket with bucketName. -// -// Location is an optional argument, by default all buckets are -// created in US Standard Region. -// -// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html -// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(bucketName string, location string) (err error) { - return c.makeBucket(bucketName, location, false) -} - -// MakeBucketWithObjectLock creates a object lock enabled new bucket with bucketName. -// -// Location is an optional argument, by default all buckets are -// created in US Standard Region. -// -// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html -// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucketWithObjectLock(bucketName string, location string) (err error) { - return c.makeBucket(bucketName, location, true) -} - -// SetBucketPolicy set the access permissions on an existing bucket. -func (c Client) SetBucketPolicy(bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If policy is empty then delete the bucket policy. - if policy == "" { - return c.removeBucketPolicy(bucketName) - } - - // Save the updated policies. - return c.putBucketPolicy(bucketName, policy) -} - -// Saves a new bucket policy. -func (c Client) putBucketPolicy(bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Content-length is mandatory for put policy request - policyReader := strings.NewReader(policy) - b, err := ioutil.ReadAll(policyReader) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: policyReader, - contentLength: int64(len(b)), - } - - // Execute PUT to upload a new bucket policy. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Removes all policies on a bucket. -func (c Client) removeBucketPolicy(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// SetBucketLifecycle set the lifecycle on an existing bucket. -func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If lifecycle is empty then delete it. - if lifecycle == "" { - return c.removeBucketLifecycle(bucketName) - } - - // Save the updated lifecycle. - return c.putBucketLifecycle(bucketName, lifecycle) -} - -// Saves a new bucket lifecycle. -func (c Client) putBucketLifecycle(bucketName, lifecycle string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Content-length is mandatory for put lifecycle request - lifecycleReader := strings.NewReader(lifecycle) - b, err := ioutil.ReadAll(lifecycleReader) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: lifecycleReader, - contentLength: int64(len(b)), - contentMD5Base64: sumMD5Base64(b), - } - - // Execute PUT to upload a new bucket lifecycle. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Remove lifecycle from a bucket. -func (c Client) removeBucketLifecycle(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// SetBucketNotification saves a new bucket notification. -func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("notification", "") - - notifBytes, err := xml.Marshal(bucketNotification) - if err != nil { - return err - } - - notifBuffer := bytes.NewReader(notifBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: notifBuffer, - contentLength: int64(len(notifBytes)), - contentMD5Base64: sumMD5Base64(notifBytes), - contentSHA256Hex: sum256Hex(notifBytes), - } - - // Execute PUT to upload a new bucket notification. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config -func (c Client) RemoveAllBucketNotification(bucketName string) error { - return c.SetBucketNotification(bucketName, BucketNotification{}) -} - -var ( - versionEnableConfig = []byte("Enabled") - versionEnableConfigLen = int64(len(versionEnableConfig)) - versionEnableConfigMD5Sum = sumMD5Base64(versionEnableConfig) - versionEnableConfigSHA256 = sum256Hex(versionEnableConfig) - - versionDisableConfig = []byte("Suspended") - versionDisableConfigLen = int64(len(versionDisableConfig)) - versionDisableConfigMD5Sum = sumMD5Base64(versionDisableConfig) - versionDisableConfigSHA256 = sum256Hex(versionDisableConfig) -) - -func (c Client) setVersioning(bucketName string, config []byte, length int64, md5sum, sha256sum string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("versioning", "") - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(config), - contentLength: length, - contentMD5Base64: md5sum, - contentSHA256Hex: sha256sum, - } - - // Execute PUT to set a bucket versioning. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// EnableVersioning - Enable object versioning in given bucket. -func (c Client) EnableVersioning(bucketName string) error { - return c.setVersioning(bucketName, versionEnableConfig, versionEnableConfigLen, versionEnableConfigMD5Sum, versionEnableConfigSHA256) -} - -// DisableVersioning - Disable object versioning in given bucket. -func (c Client) DisableVersioning(bucketName string) error { - return c.setVersioning(bucketName, versionDisableConfig, versionDisableConfigLen, versionDisableConfigMD5Sum, versionDisableConfigSHA256) -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-common.go b/vendor/github.com/minio/minio-go/v6/api-put-object-common.go deleted file mode 100644 index a786d2a8e..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object-common.go +++ /dev/null @@ -1,138 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "math" - "os" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// Verify if reader is *minio.Object -func isObject(reader io.Reader) (ok bool) { - _, ok = reader.(*Object) - return -} - -// Verify if reader is a generic ReaderAt -func isReadAt(reader io.Reader) (ok bool) { - var v *os.File - v, ok = reader.(*os.File) - if ok { - // Stdin, Stdout and Stderr all have *os.File type - // which happen to also be io.ReaderAt compatible - // we need to add special conditions for them to - // be ignored by this function. - for _, f := range []string{ - "/dev/stdin", - "/dev/stdout", - "/dev/stderr", - } { - if f == v.Name() { - ok = false - break - } - } - } else { - _, ok = reader.(io.ReaderAt) - } - return -} - -// optimalPartInfo - calculate the optimal part info for a given -// object size. -// -// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible -// object storage it will have the following parameters as constants. -// -// maxPartsCount - 10000 -// minPartSize - 128MiB -// maxMultipartPutObjectSize - 5TiB -// -func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { - // object size is '-1' set it to 5TiB. - if objectSize == -1 { - objectSize = maxMultipartPutObjectSize - } - - // object size is larger than supported maximum. - if objectSize > maxMultipartPutObjectSize { - err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") - return - } - - var partSizeFlt float64 - if configuredPartSize > 0 { - if int64(configuredPartSize) > objectSize { - err = ErrEntityTooLarge(int64(configuredPartSize), objectSize, "", "") - return - } - - if objectSize > (int64(configuredPartSize) * maxPartsCount) { - err = ErrInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") - return - } - - if configuredPartSize < absMinPartSize { - err = ErrInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") - return - } - - if configuredPartSize > maxPartSize { - err = ErrInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") - return - } - partSizeFlt = float64(configuredPartSize) - } else { - configuredPartSize = minPartSize - // Use floats for part size for all calculations to avoid - // overflows during float64 to int64 conversions. - partSizeFlt = float64(objectSize / maxPartsCount) - partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) - } - - // Total parts count. - totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) - // Part size. - partSize = int64(partSizeFlt) - // Last part size. - lastPartSize = objectSize - int64(totalPartsCount-1)*partSize - return totalPartsCount, partSize, lastPartSize, nil -} - -// getUploadID - fetch upload id if already present for an object name -// or initiate a new request to fetch a new upload id. -func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return "", err - } - - // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) - if err != nil { - return "", err - } - return initMultipartUploadResult.UploadID, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-context.go b/vendor/github.com/minio/minio-go/v6/api-put-object-context.go deleted file mode 100644 index 415a7878f..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object-context.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" -) - -// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. -func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (n int64, err error) { - err = opts.validate() - if err != nil { - return 0, err - } - return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-copy.go b/vendor/github.com/minio/minio-go/v6/api-put-object-copy.go deleted file mode 100644 index 19e58add8..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object-copy.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "io/ioutil" - "net/http" - - "github.com/minio/minio-go/v6/pkg/encrypt" -) - -// CopyObject - copy a source object into a new object -func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { - return c.CopyObjectWithProgress(dst, src, nil) -} - -// CopyObjectWithProgress - copy a source object into a new object, optionally takes -// progress bar input to notify current progress. -func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error { - header := make(http.Header) - for k, v := range src.Headers { - header[k] = v - } - - var err error - var size int64 - // If progress bar is specified, size should be requested as well initiate a StatObject request. - if progress != nil { - size, _, _, err = src.getProps(c) - if err != nil { - return err - } - } - - if src.encryption != nil { - encrypt.SSECopy(src.encryption).Marshal(header) - } - - if dst.encryption != nil { - dst.encryption.Marshal(header) - } - for k, v := range dst.getUserMetaHeadersMap(true) { - header.Set(k, v) - } - - resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{ - bucketName: dst.bucket, - objectName: dst.object, - customHeader: header, - }) - if err != nil { - return err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, dst.bucket, dst.object) - } - - // Update the progress properly after successful copy. - if progress != nil { - io.CopyN(ioutil.Discard, progress, size) - } - - return nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v6/api-put-object-file-context.go deleted file mode 100644 index fb22c0d64..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object-file-context.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "mime" - "os" - "path/filepath" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. -func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Open the referenced file. - fileReader, err := os.Open(filePath) - // If any error fail quickly here. - if err != nil { - return 0, err - } - defer fileReader.Close() - - // Save the file stat. - fileStat, err := fileReader.Stat() - if err != nil { - return 0, err - } - - // Save the file size. - fileSize := fileStat.Size() - - // Set contentType based on filepath extension if not given or default - // value of "application/octet-stream" if the extension has no associated type. - if opts.ContentType == "" { - if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { - opts.ContentType = "application/octet-stream" - } - } - return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-file.go b/vendor/github.com/minio/minio-go/v6/api-put-object-file.go deleted file mode 100644 index 23df6cd1c..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object-file.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" -) - -// FPutObject - Create an object in a bucket, with contents from file at filePath -func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { - return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v6/api-put-object-multipart.go deleted file mode 100644 index ab284f986..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object-multipart.go +++ /dev/null @@ -1,372 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "runtime/debug" - "sort" - "strconv" - "strings" - - "github.com/minio/minio-go/v6/pkg/encrypt" - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions) (n int64, err error) { - n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - } - return n, err -} - -func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) - if err != nil { - return 0, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - defer debug.FreeOSMemory() - - for partNumber <= totalPartsCount { - // Choose hash algorithms to be calculated by hashCopyN, - // avoid sha256 with non-v4 signature request or - // HTTPS connection. - hashAlgos, hashSums := c.hashMaterials() - - length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF { - break - } - if rErr != nil && rErr != io.ErrUnexpectedEOF { - return 0, rErr - } - - // Calculates hash sums while copying partSize bytes into cw. - for k, v := range hashAlgos { - v.Write(buf[:length]) - hashSums[k] = v.Sum(nil) - } - - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Checksums.. - var ( - md5Base64 string - sha256Hex string - ) - if hashSums["md5"] != nil { - md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) - } - if hashSums["sha256"] != nil { - sha256Hex = hex.EncodeToString(hashSums["sha256"]) - } - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rErr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} - -// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return initiateMultipartUploadResult{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return initiateMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploads", "") - - // Set ContentType header. - customHeader := opts.Header() - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - } - - // Execute POST on an objectName to initiate multipart upload. - resp, err := c.executeMethod(ctx, "POST", reqMetadata) - defer closeResponse(resp) - if err != nil { - return initiateMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode xml for new multipart upload. - initiateMultipartUploadResult := initiateMultipartUploadResult{} - err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) - if err != nil { - return initiateMultipartUploadResult, err - } - return initiateMultipartUploadResult, nil -} - -// uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectPart{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectPart{}, err - } - if size > maxPartSize { - return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) - } - if size <= -1 { - return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName) - } - if partNumber <= 0 { - return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") - } - if uploadID == "" { - return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.") - } - - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - // Set encryption headers, if any. - customHeader := make(http.Header) - // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html - // Server-side encryption is supported by the S3 Multipart Upload actions. - // Unless you are using a customer-provided encryption key, you don't need - // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - } - - // Execute PUT on each part. - resp, err := c.executeMethod(ctx, "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectPart{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Once successfully uploaded, return completed part. - objPart := ObjectPart{} - objPart.Size = size - objPart.PartNumber = partNumber - // Trim off the odd double quotes from ETag in the beginning and end. - objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") - return objPart, nil -} - -// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload) (completeMultipartUploadResult, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return completeMultipartUploadResult{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return completeMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - // Marshal complete multipart body. - completeMultipartUploadBytes, err := xml.Marshal(complete) - if err != nil { - return completeMultipartUploadResult{}, err - } - - // Instantiate all the complete multipart buffer. - completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: completeMultipartUploadBuffer, - contentLength: int64(len(completeMultipartUploadBytes)), - contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), - } - - // Execute POST to complete multipart upload for an objectName. - resp, err := c.executeMethod(ctx, "POST", reqMetadata) - defer closeResponse(resp) - if err != nil { - return completeMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Read resp.Body into a []bytes to parse for Error response inside the body - var b []byte - b, err = ioutil.ReadAll(resp.Body) - if err != nil { - return completeMultipartUploadResult{}, err - } - // Decode completed multipart upload response on success. - completeMultipartUploadResult := completeMultipartUploadResult{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return completeMultipartUploadResult, err - } else if completeMultipartUploadResult.Bucket == "" { - // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. - // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values - // of the members. - - // Decode completed multipart upload response on failure - completeMultipartUploadErr := ErrorResponse{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return completeMultipartUploadResult, err - } - return completeMultipartUploadResult, completeMultipartUploadErr - } - return completeMultipartUploadResult, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v6/api-put-object-streaming.go deleted file mode 100644 index 0d3f2455a..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object-streaming.go +++ /dev/null @@ -1,417 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "io" - "net/http" - "sort" - "strings" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// putObjectMultipartStream - upload a large object using -// multipart upload and streaming signature for signing payload. -// Comprehensive put object operation involving multipart uploads. -// -// Following code handles these types of readers. -// -// - *minio.Object -// - Any reader which has a method 'ReadAt()' -// -func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - - if !isObject(reader) && isReadAt(reader) { - // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. - n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) - } else { - n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - } - return n, err -} - -// uploadedPartRes - the response received from a part upload. -type uploadedPartRes struct { - Error error // Any error encountered while uploading the part. - PartNum int // Number of the part uploaded. - Size int64 // Size of the part uploaded. - Part *ObjectPart -} - -type uploadPartReq struct { - PartNum int // Number of the part uploaded. - Part *ObjectPart // Size of the part uploaded. -} - -// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. -// Supports all readers which implements io.ReaderAt interface -// (ReadAt method). -// -// NOTE: This function is meant to be used for all readers which -// implement io.ReaderAt which allows us for resuming multipart -// uploads but reading at an offset, which would avoid re-read the -// data which was already uploaded. Internally this function uses -// temporary files for staging all the data, these temporary files are -// cleaned automatically when the caller i.e http client closes the -// stream after uploading all the contents successfully. -func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) - if err != nil { - return 0, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - // Aborts the multipart upload in progress, if the - // function returns any error, since we do not resume - // we should purge the parts which have been uploaded - // to relinquish storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Declare a channel that sends the next part number to be uploaded. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadPartsCh := make(chan uploadPartReq, 10000) - - // Declare a channel that sends back the response of a part upload. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadedPartsCh := make(chan uploadedPartRes, 10000) - - // Used for readability, lastPartNumber is always totalPartsCount. - lastPartNumber := totalPartsCount - - // Send each part number to the channel to be processed. - for p := 1; p <= totalPartsCount; p++ { - uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} - } - close(uploadPartsCh) - // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= opts.getNumThreads(); w++ { - go func(partSize int64) { - // Each worker will draw from the part channel and upload in parallel. - for uploadReq := range uploadPartsCh { - - // If partNumber was not uploaded we calculate the missing - // part offset and size. For all other part numbers we - // calculate offset based on multiples of partSize. - readOffset := int64(uploadReq.PartNum-1) * partSize - - // As a special case if partNumber is lastPartNumber, we - // calculate the offset based on the last part size. - if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) - partSize = lastPartSize - } - - // Get a section reader on a particular offset. - sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, - sectionReader, uploadReq.PartNum, - "", "", partSize, opts.ServerSideEncryption) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Size: 0, - Error: err, - } - // Exit the goroutine. - return - } - - // Save successfully uploaded part metadata. - uploadReq.Part = &objPart - - // Send successful part info through the channel. - uploadedPartsCh <- uploadedPartRes{ - Size: objPart.Size, - PartNum: uploadReq.PartNum, - Part: uploadReq.Part, - Error: nil, - } - } - }(partSize) - } - - // Gather the responses as they occur and update any - // progress bar. - for u := 1; u <= totalPartsCount; u++ { - uploadRes := <-uploadedPartsCh - if uploadRes.Error != nil { - return totalUploadedSize, uploadRes.Error - } - // Retrieve each uploaded part and store it to be completed. - // part, ok := partsInfo[uploadRes.PartNum] - part := uploadRes.Part - if part == nil { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) - } - // Update the totalUploadedSize. - totalUploadedSize += uploadRes.Size - // Store the parts to be completed in order. - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Verify if we uploaded all the data. - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} - -func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) - if err != nil { - return 0, err - } - // Initiates a new multipart request - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - // Aborts the multipart upload if the function returns - // any error, since we do not resume we should purge - // the parts which have been uploaded to relinquish - // storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Part number always starts with '1'. - var partNumber int - for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader := newHook(reader, opts.Progress) - - // Proceed to upload the part. - if partNumber == totalPartsCount { - partSize = lastPartSize - } - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, - io.LimitReader(hookReader, partSize), - partNumber, "", "", partSize, opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += partSize - } - - // Verify if we uploaded all the data. - if size > 0 { - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - } - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} - -// putObjectNoChecksum special function used Google Cloud Storage. This special function -// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Size -1 is only supported on Google Cloud Storage, we error - // out in all other situations. - if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { - return 0, ErrEntityTooSmall(size, bucketName, objectName) - } - if size > 0 { - if isReadAt(reader) && !isObject(reader) { - seeker, _ := reader.(io.Seeker) - offset, err := seeker.Seek(0, io.SeekCurrent) - if err != nil { - return 0, ErrInvalidArgument(err.Error()) - } - reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) - } - } - - // Update progress reader appropriately to the latest offset as we - // read from the source. - readSeeker := newHook(reader, opts.Progress) - - // This function does not calculate sha256 and md5sum for payload. - // Execute put object. - st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts) - if err != nil { - return 0, err - } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - return size, nil -} - -// putObjectDo - executes the put object http operation. -// NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - // Set headers. - customHeader := opts.Header() - - // Populate request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - } - - // Execute PUT an objectName. - resp, err := c.executeMethod(ctx, "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - var objInfo ObjectInfo - // Trim off the odd double quotes from ETag in the beginning and end. - objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") - // A success here means data was written to server successfully. - objInfo.Size = size - - // Return here. - return objInfo, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object.go b/vendor/github.com/minio/minio-go/v6/api-put-object.go deleted file mode 100644 index bc0848bbf..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-put-object.go +++ /dev/null @@ -1,290 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "runtime/debug" - "sort" - "time" - - "github.com/minio/minio-go/v6/pkg/encrypt" - "github.com/minio/minio-go/v6/pkg/s3utils" - "golang.org/x/net/http/httpguts" -) - -// PutObjectOptions represents options specified by user for PutObject call -type PutObjectOptions struct { - UserMetadata map[string]string - Progress io.Reader - ContentType string - ContentEncoding string - ContentDisposition string - ContentLanguage string - CacheControl string - Mode *RetentionMode - RetainUntilDate *time.Time - ServerSideEncryption encrypt.ServerSide - NumThreads uint - StorageClass string - WebsiteRedirectLocation string - PartSize uint64 -} - -// getNumThreads - gets the number of threads to be used in the multipart -// put object operation -func (opts PutObjectOptions) getNumThreads() (numThreads int) { - if opts.NumThreads > 0 { - numThreads = int(opts.NumThreads) - } else { - numThreads = totalWorkers - } - return -} - -// Header - constructs the headers from metadata entered by user in -// PutObjectOptions struct -func (opts PutObjectOptions) Header() (header http.Header) { - header = make(http.Header) - - if opts.ContentType != "" { - header["Content-Type"] = []string{opts.ContentType} - } else { - header["Content-Type"] = []string{"application/octet-stream"} - } - if opts.ContentEncoding != "" { - header["Content-Encoding"] = []string{opts.ContentEncoding} - } - if opts.ContentDisposition != "" { - header["Content-Disposition"] = []string{opts.ContentDisposition} - } - if opts.ContentLanguage != "" { - header["Content-Language"] = []string{opts.ContentLanguage} - } - if opts.CacheControl != "" { - header["Cache-Control"] = []string{opts.CacheControl} - } - - if opts.Mode != nil { - header["x-amz-object-lock-mode"] = []string{opts.Mode.String()} - } - if opts.RetainUntilDate != nil { - header["x-amz-object-lock-retain-until-date"] = []string{opts.RetainUntilDate.Format(time.RFC3339)} - } - - if opts.ServerSideEncryption != nil { - opts.ServerSideEncryption.Marshal(header) - } - if opts.StorageClass != "" { - header[amzStorageClass] = []string{opts.StorageClass} - } - if opts.WebsiteRedirectLocation != "" { - header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation} - } - for k, v := range opts.UserMetadata { - if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { - header["X-Amz-Meta-"+k] = []string{v} - } else { - header[k] = []string{v} - } - } - return -} - -// validate() checks if the UserMetadata map has standard headers or and raises an error if so. -func (opts PutObjectOptions) validate() (err error) { - for k, v := range opts.UserMetadata { - if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { - return ErrInvalidArgument(k + " unsupported user defined metadata name") - } - if !httpguts.ValidHeaderFieldValue(v) { - return ErrInvalidArgument(v + " unsupported user defined metadata value") - } - } - if opts.Mode != nil { - if !opts.Mode.IsValid() { - return ErrInvalidArgument(opts.Mode.String() + " unsupported retention mode") - } - } - return nil -} - -// completedParts is a collection of parts sortable by their part numbers. -// used for sorting the uploaded parts before completing the multipart request. -type completedParts []CompletePart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } - -// PutObject creates an object in a bucket. -// -// You must have WRITE permissions on a bucket to create an object. -// -// - For size smaller than 128MiB PutObject automatically does a -// single atomic Put operation. -// - For size larger than 128MiB PutObject automatically does a -// multipart Put operation. -// - For size input as -1 PutObject does a multipart Put operation -// until input stream reaches EOF. Maximum object size that can -// be uploaded through this operation will be 5TiB. -func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (n int64, err error) { - return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) -} - -func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - // Check for largest object size allowed. - if size > int64(maxMultipartPutObjectSize) { - return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) - } - - // NOTE: Streaming signature is not supported by GCS. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - // Do not compute MD5 for Google Cloud Storage. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - - partSize := opts.PartSize - if opts.PartSize == 0 { - partSize = minPartSize - } - - if c.overrideSignerType.IsV2() { - if size >= 0 && size < int64(partSize) { - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) - } - if size < 0 { - return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) - } - - if size < int64(partSize) { - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - - // For all sizes greater than 128MiB do multipart. - return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) -} - -func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) - if err != nil { - return 0, err - } - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - defer debug.FreeOSMemory() - - for partNumber <= totalPartsCount { - length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF && partNumber > 1 { - break - } - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return 0, rErr - } - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - "", "", int64(length), opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rErr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-remove.go b/vendor/github.com/minio/minio-go/v6/api-remove.go deleted file mode 100644 index 4c8c335c3..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-remove.go +++ /dev/null @@ -1,332 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io" - "net/http" - "net/url" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// RemoveBucket deletes the bucket name. -// -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. -func (c Client) RemoveBucket(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Execute DELETE on bucket. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Remove the location from cache on a successful delete. - c.bucketLocCache.Delete(bucketName) - - return nil -} - -// RemoveObject remove an object from a bucket. -func (c Client) RemoveObject(bucketName, objectName string) error { - return c.RemoveObjectWithOptions(bucketName, objectName, RemoveObjectOptions{}) -} - -// RemoveObjectOptions represents options specified by user for PutObject call -type RemoveObjectOptions struct { - GovernanceBypass bool - VersionID string -} - -// RemoveObjectWithOptions removes an object from a bucket. -func (c Client) RemoveObjectWithOptions(bucketName, objectName string, opts RemoveObjectOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Build headers. - headers := make(http.Header) - - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set("x-amz-bypass-governance-retention", "True") - } - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Hex: emptySHA256Hex, - queryValues: urlValues, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - // if some unexpected error happened and max retry is reached, we want to let client know - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // DeleteObject always responds with http '204' even for - // objects which do not exist. So no need to handle them - // specifically. - return nil -} - -// RemoveObjectError - container of Multi Delete S3 API error -type RemoveObjectError struct { - ObjectName string - Err error -} - -// generateRemoveMultiObjects - generate the XML request for remove multi objects request -func generateRemoveMultiObjectsRequest(objects []string) []byte { - rmObjects := []deleteObject{} - for _, obj := range objects { - rmObjects = append(rmObjects, deleteObject{Key: obj}) - } - xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) - return xmlBytes -} - -// processRemoveMultiObjectsResponse - parse the remove multi objects web service -// and return the success/failure result status for each object -func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { - // Parse multi delete XML response - rmResult := &deleteMultiObjectsResult{} - err := xmlDecoder(body, rmResult) - if err != nil { - errorCh <- RemoveObjectError{ObjectName: "", Err: err} - return - } - - // Fill deletion that returned an error. - for _, obj := range rmResult.UnDeletedObjects { - errorCh <- RemoveObjectError{ - ObjectName: obj.Key, - Err: ErrorResponse{ - Code: obj.Code, - Message: obj.Message, - }, - } - } -} - -// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. -func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { - errorCh := make(chan RemoveObjectError, 1) - - // Validate if bucket name is valid. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: err, - } - return errorCh - } - // Validate objects channel to be properly allocated. - if objectsCh == nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: ErrInvalidArgument("Objects channel cannot be nil"), - } - return errorCh - } - - // Generate and call MultiDelete S3 requests based on entries received from objectsCh - go func(errorCh chan<- RemoveObjectError) { - maxEntries := 1000 - finish := false - urlValues := make(url.Values) - urlValues.Set("delete", "") - - // Close error channel when Multi delete finishes. - defer close(errorCh) - - // Loop over entries by 1000 and call MultiDelete requests - for { - if finish { - break - } - count := 0 - var batch []string - - // Try to gather 1000 entries - for object := range objectsCh { - batch = append(batch, object) - if count++; count >= maxEntries { - break - } - } - if count == 0 { - // Multi Objects Delete API doesn't accept empty object list, quit immediately - break - } - if count < maxEntries { - // We didn't have 1000 entries, so this is the last batch - finish = true - } - - // Generate remove multi objects XML request - removeBytes := generateRemoveMultiObjectsRequest(batch) - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, "POST", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(removeBytes), - contentLength: int64(len(removeBytes)), - contentMD5Base64: sumMD5Base64(removeBytes), - contentSHA256Hex: sum256Hex(removeBytes), - }) - if resp != nil { - if resp.StatusCode != http.StatusOK { - e := httpRespToErrorResponse(resp, bucketName, "") - errorCh <- RemoveObjectError{ObjectName: "", Err: e} - } - } - if err != nil { - for _, b := range batch { - errorCh <- RemoveObjectError{ObjectName: b, Err: err} - } - continue - } - - // Process multiobjects remove xml response - processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) - - closeResponse(resp) - } - }(errorCh) - return errorCh -} - -// RemoveObjects removes multiple objects from a bucket. -// The list of objects to remove are received from objectsCh. -// Remove failures are sent back via error channel. -func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { - return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) -} - -// RemoveIncompleteUpload aborts an partially uploaded object. -func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - // Find multipart upload ids of the object to be aborted. - uploadIDs, err := c.findUploadIDs(bucketName, objectName) - if err != nil { - return err - } - - for _, uploadID := range uploadIDs { - // abort incomplete multipart upload, based on the upload id passed. - err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) - if err != nil { - return err - } - } - - return nil -} - -// abortMultipartUpload aborts a multipart upload for the given -// uploadID, all previously uploaded parts are deleted. -func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - - // Execute DELETE on multipart upload. - resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - // Abort has no response body, handle it for any errors. - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - // This is needed specifically for abort and it cannot - // be converged into default case. - errorResponse = ErrorResponse{ - Code: "NoSuchUpload", - Message: "The specified multipart upload does not exist.", - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - default: - return httpRespToErrorResponse(resp, bucketName, objectName) - } - return errorResponse - } - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v6/api-s3-datatypes.go deleted file mode 100644 index a6b125522..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-s3-datatypes.go +++ /dev/null @@ -1,245 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "time" -) - -// listAllMyBucketsResult container for listBuckets response. -type listAllMyBucketsResult struct { - // Container for one or more buckets. - Buckets struct { - Bucket []BucketInfo - } - Owner owner -} - -// owner container for bucket owner information. -type owner struct { - DisplayName string - ID string -} - -// CommonPrefix container for prefix response. -type CommonPrefix struct { - Prefix string -} - -// ListBucketV2Result container for listObjects response version 2. -type ListBucketV2Result struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - MaxKeys int64 - Name string - - // Hold the token that will be sent in the next request to fetch the next group of keys - NextContinuationToken string - - ContinuationToken string - Prefix string - - // FetchOwner and StartAfter are currently not used - FetchOwner string - StartAfter string -} - -// ListBucketResult container for listObjects response. -type ListBucketResult struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - Marker string - MaxKeys int64 - Name string - - // When response is truncated (the IsTruncated element value in - // the response is true), you can use the key name in this field - // as marker in the subsequent request to get next set of objects. - // Object storage lists objects in alphabetical order Note: This - // element is returned only if you have delimiter request - // parameter specified. If response does not include the NextMaker - // and it is truncated, you can use the value of the last Key in - // the response as the marker in the subsequent request to get the - // next set of object keys. - NextMarker string - Prefix string -} - -// ListMultipartUploadsResult container for ListMultipartUploads response -type ListMultipartUploadsResult struct { - Bucket string - KeyMarker string - UploadIDMarker string `xml:"UploadIdMarker"` - NextKeyMarker string - NextUploadIDMarker string `xml:"NextUploadIdMarker"` - EncodingType string - MaxUploads int64 - IsTruncated bool - Uploads []ObjectMultipartInfo `xml:"Upload"` - Prefix string - Delimiter string - // A response can contain CommonPrefixes only if you specify a delimiter. - CommonPrefixes []CommonPrefix -} - -// initiator container for who initiated multipart upload. -type initiator struct { - ID string - DisplayName string -} - -// copyObjectResult container for copy object response. -type copyObjectResult struct { - ETag string - LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" -} - -// ObjectPart container for particular part of an object. -type ObjectPart struct { - // Part number identifies the part. - PartNumber int - - // Date and time the part was uploaded. - LastModified time.Time - - // Entity tag returned when the part was uploaded, usually md5sum - // of the part. - ETag string - - // Size of the uploaded part data. - Size int64 -} - -// ListObjectPartsResult container for ListObjectParts response. -type ListObjectPartsResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` - - Initiator initiator - Owner owner - - StorageClass string - PartNumberMarker int - NextPartNumberMarker int - MaxParts int - - // Indicates whether the returned list of parts is truncated. - IsTruncated bool - ObjectParts []ObjectPart `xml:"Part"` - - EncodingType string -} - -// initiateMultipartUploadResult container for InitiateMultiPartUpload -// response. -type initiateMultipartUploadResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` -} - -// completeMultipartUploadResult container for completed multipart -// upload response. -type completeMultipartUploadResult struct { - Location string - Bucket string - Key string - ETag string -} - -// CompletePart sub container lists individual part numbers and their -// md5sum, part of completeMultipartUpload. -type CompletePart struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` - - // Part number identifies the part. - PartNumber int - ETag string -} - -// completeMultipartUpload container for completing multipart upload. -type completeMultipartUpload struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` - Parts []CompletePart `xml:"Part"` -} - -// createBucketConfiguration container for bucket configuration. -type createBucketConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` - Location string `xml:"LocationConstraint"` -} - -// deleteObject container for Delete element in MultiObjects Delete XML request -type deleteObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` -} - -// deletedObject container for Deleted element in MultiObjects Delete XML response -type deletedObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` - // These fields are ignored. - DeleteMarker bool - DeleteMarkerVersionID string -} - -// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response -type nonDeletedObject struct { - Key string - Code string - Message string -} - -// deletedMultiObjects container for MultiObjects Delete XML request -type deleteMultiObjects struct { - XMLName xml.Name `xml:"Delete"` - Quiet bool - Objects []deleteObject `xml:"Object"` -} - -// deletedMultiObjectsResult container for MultiObjects Delete XML response -type deleteMultiObjectsResult struct { - XMLName xml.Name `xml:"DeleteResult"` - DeletedObjects []deletedObject `xml:"Deleted"` - UnDeletedObjects []nonDeletedObject `xml:"Error"` -} diff --git a/vendor/github.com/minio/minio-go/v6/api-select.go b/vendor/github.com/minio/minio-go/v6/api-select.go deleted file mode 100644 index 7ecd95380..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-select.go +++ /dev/null @@ -1,538 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/xml" - "errors" - "fmt" - "hash" - "hash/crc32" - "io" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/v6/pkg/encrypt" - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// CSVFileHeaderInfo - is the parameter for whether to utilize headers. -type CSVFileHeaderInfo string - -// Constants for file header info. -const ( - CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" - CSVFileHeaderInfoIgnore = "IGNORE" - CSVFileHeaderInfoUse = "USE" -) - -// SelectCompressionType - is the parameter for what type of compression is -// present -type SelectCompressionType string - -// Constants for compression types under select API. -const ( - SelectCompressionNONE SelectCompressionType = "NONE" - SelectCompressionGZIP = "GZIP" - SelectCompressionBZIP = "BZIP2" -) - -// CSVQuoteFields - is the parameter for how CSV fields are quoted. -type CSVQuoteFields string - -// Constants for csv quote styles. -const ( - CSVQuoteFieldsAlways CSVQuoteFields = "Always" - CSVQuoteFieldsAsNeeded = "AsNeeded" -) - -// QueryExpressionType - is of what syntax the expression is, this should only -// be SQL -type QueryExpressionType string - -// Constants for expression type. -const ( - QueryExpressionTypeSQL QueryExpressionType = "SQL" -) - -// JSONType determines json input serialization type. -type JSONType string - -// Constants for JSONTypes. -const ( - JSONDocumentType JSONType = "DOCUMENT" - JSONLinesType = "LINES" -) - -// ParquetInputOptions parquet input specific options -type ParquetInputOptions struct{} - -// CSVInputOptions csv input specific options -type CSVInputOptions struct { - FileHeaderInfo CSVFileHeaderInfo - RecordDelimiter string - FieldDelimiter string `xml:",omitempty"` - QuoteCharacter string `xml:",omitempty"` - QuoteEscapeCharacter string `xml:",omitempty"` - Comments string `xml:",omitempty"` -} - -// CSVOutputOptions csv output specific options -type CSVOutputOptions struct { - QuoteFields CSVQuoteFields `xml:",omitempty"` - RecordDelimiter string - FieldDelimiter string `xml:",omitempty"` - QuoteCharacter string `xml:",omitempty"` - QuoteEscapeCharacter string `xml:",omitempty"` -} - -// JSONInputOptions json input specific options -type JSONInputOptions struct { - Type JSONType -} - -// JSONOutputOptions - json output specific options -type JSONOutputOptions struct { - RecordDelimiter string -} - -// SelectObjectInputSerialization - input serialization parameters -type SelectObjectInputSerialization struct { - CompressionType SelectCompressionType - Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` - CSV *CSVInputOptions `xml:"CSV,omitempty"` - JSON *JSONInputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOutputSerialization - output serialization parameters. -type SelectObjectOutputSerialization struct { - CSV *CSVOutputOptions `xml:"CSV,omitempty"` - JSON *JSONOutputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOptions - represents the input select body -type SelectObjectOptions struct { - XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` - ServerSideEncryption encrypt.ServerSide `xml:"-"` - Expression string - ExpressionType QueryExpressionType - InputSerialization SelectObjectInputSerialization - OutputSerialization SelectObjectOutputSerialization - RequestProgress struct { - Enabled bool - } -} - -// Header returns the http.Header representation of the SelectObject options. -func (o SelectObjectOptions) Header() http.Header { - headers := make(http.Header) - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - return headers -} - -// SelectObjectType - is the parameter which defines what type of object the -// operation is being performed on. -type SelectObjectType string - -// Constants for input data types. -const ( - SelectObjectTypeCSV SelectObjectType = "CSV" - SelectObjectTypeJSON = "JSON" - SelectObjectTypeParquet = "Parquet" -) - -// preludeInfo is used for keeping track of necessary information from the -// prelude. -type preludeInfo struct { - totalLen uint32 - headerLen uint32 -} - -// SelectResults is used for the streaming responses from the server. -type SelectResults struct { - pipeReader *io.PipeReader - resp *http.Response - stats *StatsMessage - progress *ProgressMessage -} - -// ProgressMessage is a struct for progress xml message. -type ProgressMessage struct { - XMLName xml.Name `xml:"Progress" json:"-"` - StatsMessage -} - -// StatsMessage is a struct for stat xml message. -type StatsMessage struct { - XMLName xml.Name `xml:"Stats" json:"-"` - BytesScanned int64 - BytesProcessed int64 - BytesReturned int64 -} - -// messageType represents the type of message. -type messageType string - -const ( - errorMsg messageType = "error" - commonMsg = "event" -) - -// eventType represents the type of event. -type eventType string - -// list of event-types returned by Select API. -const ( - endEvent eventType = "End" - recordsEvent = "Records" - progressEvent = "Progress" - statsEvent = "Stats" -) - -// contentType represents content type of event. -type contentType string - -const ( - xmlContent contentType = "text/xml" -) - -// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. -func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - selectReqBytes, err := xml.Marshal(opts) - if err != nil { - return nil, err - } - - urlValues := make(url.Values) - urlValues.Set("select", "") - urlValues.Set("select-type", "2") - - // Execute POST on bucket/object. - resp, err := c.executeMethod(ctx, "POST", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: opts.Header(), - contentMD5Base64: sumMD5Base64(selectReqBytes), - contentSHA256Hex: sum256Hex(selectReqBytes), - contentBody: bytes.NewReader(selectReqBytes), - contentLength: int64(len(selectReqBytes)), - }) - if err != nil { - return nil, err - } - - return NewSelectResults(resp, bucketName) -} - -// NewSelectResults creates a Select Result parser that parses the response -// and returns a Reader that will return parsed and assembled select output. -func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - pipeReader, pipeWriter := io.Pipe() - streamer := &SelectResults{ - resp: resp, - stats: &StatsMessage{}, - progress: &ProgressMessage{}, - pipeReader: pipeReader, - } - streamer.start(pipeWriter) - return streamer, nil -} - -// Close - closes the underlying response body and the stream reader. -func (s *SelectResults) Close() error { - defer closeResponse(s.resp) - return s.pipeReader.Close() -} - -// Read - is a reader compatible implementation for SelectObjectContent records. -func (s *SelectResults) Read(b []byte) (n int, err error) { - return s.pipeReader.Read(b) -} - -// Stats - information about a request's stats when processing is complete. -func (s *SelectResults) Stats() *StatsMessage { - return s.stats -} - -// Progress - information about the progress of a request. -func (s *SelectResults) Progress() *ProgressMessage { - return s.progress -} - -// start is the main function that decodes the large byte array into -// several events that are sent through the eventstream. -func (s *SelectResults) start(pipeWriter *io.PipeWriter) { - go func() { - for { - var prelude preludeInfo - var headers = make(http.Header) - var err error - - // Create CRC code - crc := crc32.New(crc32.IEEETable) - crcReader := io.TeeReader(s.resp.Body, crc) - - // Extract the prelude(12 bytes) into a struct to extract relevant information. - prelude, err = processPrelude(crcReader, crc) - if err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - // Extract the headers(variable bytes) into a struct to extract relevant information - if prelude.headerLen > 0 { - if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - } - - // Get the actual payload length so that the appropriate amount of - // bytes can be read or parsed. - payloadLen := prelude.PayloadLen() - - m := messageType(headers.Get("message-type")) - - switch m { - case errorMsg: - pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) - closeResponse(s.resp) - return - case commonMsg: - // Get content-type of the payload. - c := contentType(headers.Get("content-type")) - - // Get event type of the payload. - e := eventType(headers.Get("event-type")) - - // Handle all supported events. - switch e { - case endEvent: - pipeWriter.Close() - closeResponse(s.resp) - return - case recordsEvent: - if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - case progressEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) - closeResponse(s.resp) - return - } - case statsEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) - closeResponse(s.resp) - return - } - } - } - - // Ensures that the full message's CRC is correct and - // that the message is not corrupted - if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - } - }() -} - -// PayloadLen is a function that calculates the length of the payload. -func (p preludeInfo) PayloadLen() int64 { - return int64(p.totalLen - p.headerLen - 16) -} - -// processPrelude is the function that reads the 12 bytes of the prelude and -// ensures the CRC is correct while also extracting relevant information into -// the struct, -func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { - var err error - var pInfo = preludeInfo{} - - // reads total length of the message (first 4 bytes) - pInfo.totalLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // reads total header length of the message (2nd 4 bytes) - pInfo.headerLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // checks that the CRC is correct (3rd 4 bytes) - preCRC := crc.Sum32() - if err := checkCRC(prelude, preCRC); err != nil { - return pInfo, err - } - - return pInfo, nil -} - -// extracts the relevant information from the Headers. -func extractHeader(body io.Reader, myHeaders http.Header) error { - for { - // extracts the first part of the header, - headerTypeName, err := extractHeaderType(body) - if err != nil { - // Since end of file, we have read all of our headers - if err == io.EOF { - break - } - return err - } - - // reads the 7 present in the header and ignores it. - extractUint8(body) - - headerValueName, err := extractHeaderValue(body) - if err != nil { - return err - } - - myHeaders.Set(headerTypeName, headerValueName) - - } - return nil -} - -// extractHeaderType extracts the first half of the header message, the header type. -func extractHeaderType(body io.Reader) (string, error) { - // extracts 2 bit integer - headerNameLen, err := extractUint8(body) - if err != nil { - return "", err - } - // extracts the string with the appropriate number of bytes - headerName, err := extractString(body, int(headerNameLen)) - if err != nil { - return "", err - } - return strings.TrimPrefix(headerName, ":"), nil -} - -// extractsHeaderValue extracts the second half of the header message, the -// header value -func extractHeaderValue(body io.Reader) (string, error) { - bodyLen, err := extractUint16(body) - if err != nil { - return "", err - } - bodyName, err := extractString(body, int(bodyLen)) - if err != nil { - return "", err - } - return bodyName, nil -} - -// extracts a string from byte array of a particular number of bytes. -func extractString(source io.Reader, lenBytes int) (string, error) { - myVal := make([]byte, lenBytes) - _, err := source.Read(myVal) - if err != nil { - return "", err - } - return string(myVal), nil -} - -// extractUint32 extracts a 4 byte integer from the byte array. -func extractUint32(r io.Reader) (uint32, error) { - buf := make([]byte, 4) - _, err := io.ReadFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(buf), nil -} - -// extractUint16 extracts a 2 byte integer from the byte array. -func extractUint16(r io.Reader) (uint16, error) { - buf := make([]byte, 2) - _, err := io.ReadFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(buf), nil -} - -// extractUint8 extracts a 1 byte integer from the byte array. -func extractUint8(r io.Reader) (uint8, error) { - buf := make([]byte, 1) - _, err := io.ReadFull(r, buf) - if err != nil { - return 0, err - } - return buf[0], nil -} - -// checkCRC ensures that the CRC matches with the one from the reader. -func checkCRC(r io.Reader, expect uint32) error { - msgCRC, err := extractUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) - - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api-stat.go b/vendor/github.com/minio/minio-go/v6/api-stat.go deleted file mode 100644 index 5cc40e757..000000000 --- a/vendor/github.com/minio/minio-go/v6/api-stat.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// BucketExists verify if bucket exists and you have permission to access it. -func (c Client) BucketExists(bucketName string) (bool, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return false, err - } - - // Execute HEAD on bucketName. - resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - if ToErrorResponse(err).Code == "NoSuchBucket" { - return false, nil - } - return false, err - } - if resp != nil { - resperr := httpRespToErrorResponse(resp, bucketName, "") - if ToErrorResponse(resperr).Code == "NoSuchBucket" { - return false, nil - } - if resp.StatusCode != http.StatusOK { - return false, httpRespToErrorResponse(resp, bucketName, "") - } - } - return true, nil -} - -// List of header keys to be filtered, usually -// from all S3 API http responses. -var defaultFilterKeys = []string{ - "Connection", - "Transfer-Encoding", - "Accept-Ranges", - "Date", - "Server", - "Vary", - "x-amz-bucket-region", - "x-amz-request-id", - "x-amz-id-2", - "Content-Security-Policy", - "X-Xss-Protection", - - // Add new headers to be ignored. -} - -// Extract only necessary metadata header key/values by -// filtering them out with a list of custom header keys. -func extractObjMetadata(header http.Header) http.Header { - filterKeys := append([]string{ - "ETag", - "Content-Length", - "Last-Modified", - "Content-Type", - "Expires", - }, defaultFilterKeys...) - return filterHeader(header, filterKeys) -} - -// StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - return c.statObject(context.Background(), bucketName, objectName, opts) -} - -// Lower level API for statObject supporting pre-conditions and range headers. -func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - - // Execute HEAD on objectName. - resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Hex: emptySHA256Hex, - customHeader: opts.Header(), - }) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Trim off the odd double quotes from ETag in the beginning and end. - md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - md5sum = strings.TrimSuffix(md5sum, "\"") - - // Parse content length is exists - var size int64 = -1 - contentLengthStr := resp.Header.Get("Content-Length") - if contentLengthStr != "" { - size, err = strconv.ParseInt(contentLengthStr, 10, 64) - if err != nil { - // Content-Length is not valid - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Content-Length is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - } - - // Parse Last-Modified has http time format. - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Last-Modified time format is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - - // Fetch content type if any present. - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - expiryStr := resp.Header.Get("Expires") - var expTime time.Time - if t, err := time.Parse(http.TimeFormat, expiryStr); err == nil { - expTime = t.UTC() - } - - metadata := extractObjMetadata(resp.Header) - userMetadata := map[string]string{} - const xamzmeta = "x-amz-meta-" - const xamzmetaLen = len(xamzmeta) - for k, v := range metadata { - if strings.HasPrefix(strings.ToLower(k), xamzmeta) { - userMetadata[k[xamzmetaLen:]] = v[0] - } - } - - // Save object metadata info. - return ObjectInfo{ - ETag: md5sum, - Key: objectName, - Size: size, - LastModified: date, - ContentType: contentType, - Expires: expTime, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: metadata, - UserMetadata: userMetadata, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/api.go b/vendor/github.com/minio/minio-go/v6/api.go deleted file mode 100644 index 7d64a3d97..000000000 --- a/vendor/github.com/minio/minio-go/v6/api.go +++ /dev/null @@ -1,936 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "crypto/md5" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/cookiejar" - "net/http/httputil" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" - - "github.com/minio/sha256-simd" - - "golang.org/x/net/publicsuffix" - - "github.com/minio/minio-go/v6/pkg/credentials" - "github.com/minio/minio-go/v6/pkg/s3signer" - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// Client implements Amazon S3 compatible methods. -type Client struct { - /// Standard options. - - // Parsed endpoint url provided by the user. - endpointURL *url.URL - - // Holds various credential providers. - credsProvider *credentials.Credentials - - // Custom signerType value overrides all credentials. - overrideSignerType credentials.SignatureType - - // User supplied. - appInfo struct { - appName string - appVersion string - } - - // Indicate whether we are using https or not - secure bool - - // Needs allocation. - httpClient *http.Client - bucketLocCache *bucketLocationCache - - // Advanced functionality. - isTraceEnabled bool - traceErrorsOnly bool - traceOutput io.Writer - - // S3 specific accelerated endpoint. - s3AccelerateEndpoint string - - // Region endpoint - region string - - // Random seed. - random *rand.Rand - - // lookup indicates type of url lookup supported by server. If not specified, - // default to Auto. - lookup BucketLookupType -} - -// Options for New method -type Options struct { - Creds *credentials.Credentials - Secure bool - Region string - BucketLookup BucketLookupType - // Add future fields here -} - -// Global constants. -const ( - libraryName = "minio-go" - libraryVersion = "v6.0.43" -) - -// User Agent should always following the below style. -// Please open an issue to discuss any new changes here. -// -// MinIO (OS; ARCH) LIB/VER APP/VER -const ( - libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " - libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion -) - -// BucketLookupType is type of url lookup supported by server. -type BucketLookupType int - -// Different types of url lookup supported by the server.Initialized to BucketLookupAuto -const ( - BucketLookupAuto BucketLookupType = iota - BucketLookupDNS - BucketLookupPath -) - -// NewV2 - instantiate minio client with Amazon S3 signature version -// '2' compatibility. -func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err - } - clnt.overrideSignerType = credentials.SignatureV2 - return clnt, nil -} - -// NewV4 - instantiate minio client with Amazon S3 signature version -// '4' compatibility. -func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err - } - clnt.overrideSignerType = credentials.SignatureV4 - return clnt, nil -} - -// New - instantiate minio client, adds automatic verification of signature. -func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err - } - // Google cloud storage should be set to signature V2, force it if not. - if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV2 - } - // If Amazon S3 set to signature v4. - if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV4 - } - return clnt, nil -} - -// NewWithCredentials - instantiate minio client with credentials provider -// for retrieving credentials from various credentials provider such as -// IAM, File, Env etc. -func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { - return privateNew(endpoint, creds, secure, region, BucketLookupAuto) -} - -// NewWithRegion - instantiate minio client, with region configured. Unlike New(), -// NewWithRegion avoids bucket-location lookup operations and it is slightly faster. -// Use this function when if your application deals with single region. -func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - return privateNew(endpoint, creds, secure, region, BucketLookupAuto) -} - -// NewWithOptions - instantiate minio client with options -func NewWithOptions(endpoint string, opts *Options) (*Client, error) { - return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) -} - -// EndpointURL returns the URL of the S3 endpoint. -func (c *Client) EndpointURL() *url.URL { - endpoint := *c.endpointURL // copy to prevent callers from modifying internal state - return &endpoint -} - -// lockedRandSource provides protected rand source, implements rand.Source interface. -type lockedRandSource struct { - lk sync.Mutex - src rand.Source -} - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. -func (r *lockedRandSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -// Seed uses the provided seed value to initialize the generator to a -// deterministic state. -func (r *lockedRandSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// Redirect requests by re signing the request. -func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { - if len(via) >= 5 { - return errors.New("stopped after 5 redirects") - } - if len(via) == 0 { - return nil - } - lastRequest := via[len(via)-1] - var reAuth bool - for attr, val := range lastRequest.Header { - // if hosts do not match do not copy Authorization header - if attr == "Authorization" && req.Host != lastRequest.Host { - reAuth = true - continue - } - if _, ok := req.Header[attr]; !ok { - req.Header[attr] = val - } - } - - *c.endpointURL = *req.URL - - value, err := c.credsProvider.Get() - if err != nil { - return err - } - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - region = c.region - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if reAuth { - // Check if there is no region override, if not get it from the URL if possible. - if region == "" { - region = s3utils.GetRegionFromURL(*c.endpointURL) - } - switch { - case signerType.IsV2(): - return errors.New("signature V2 cannot support redirection") - case signerType.IsV4(): - s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) - } - } - return nil -} - -func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { - // construct endpoint. - endpointURL, err := getEndpointURL(endpoint, secure) - if err != nil { - return nil, err - } - - // Initialize cookies to preserve server sent cookies if any and replay - // them upon each request. - jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) - if err != nil { - return nil, err - } - - // instantiate new Client. - clnt := new(Client) - - // Save the credentials. - clnt.credsProvider = creds - - // Remember whether we are using https or not - clnt.secure = secure - - // Save endpoint URL, user agent for future uses. - clnt.endpointURL = endpointURL - - transport, err := DefaultTransport(secure) - if err != nil { - return nil, err - } - - // Instantiate http client and bucket location cache. - clnt.httpClient = &http.Client{ - Jar: jar, - Transport: transport, - CheckRedirect: clnt.redirectHeaders, - } - - // Sets custom region, if region is empty bucket location cache is used automatically. - if region == "" { - region = s3utils.GetRegionFromURL(*clnt.endpointURL) - } - clnt.region = region - - // Instantiate bucket location cache. - clnt.bucketLocCache = newBucketLocationCache() - - // Introduce a new locked random seed. - clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) - - // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined - // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. - clnt.lookup = lookup - // Return. - return clnt, nil -} - -// SetAppInfo - add application details to user agent. -func (c *Client) SetAppInfo(appName string, appVersion string) { - // if app name and version not set, we do not set a new user agent. - if appName != "" && appVersion != "" { - c.appInfo.appName = appName - c.appInfo.appVersion = appVersion - } -} - -// SetCustomTransport - set new custom transport. -func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { - // Set this to override default transport - // ``http.DefaultTransport``. - // - // This transport is usually needed for debugging OR to add your - // own custom TLS certificates on the client transport, for custom - // CA's and certs which are not part of standard certificate - // authority follow this example :- - // - // tr := &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - // } - // api.SetCustomTransport(tr) - // - if c.httpClient != nil { - c.httpClient.Transport = customHTTPTransport - } -} - -// TraceOn - enable HTTP tracing. -func (c *Client) TraceOn(outputStream io.Writer) { - // if outputStream is nil then default to os.Stdout. - if outputStream == nil { - outputStream = os.Stdout - } - // Sets a new output stream. - c.traceOutput = outputStream - - // Enable tracing. - c.isTraceEnabled = true -} - -// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. -func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { - c.TraceOn(outputStream) - c.traceErrorsOnly = true -} - -// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. -// If all tracing needs to be turned off, call TraceOff(). -func (c *Client) TraceErrorsOnlyOff() { - c.traceErrorsOnly = false -} - -// TraceOff - disable HTTP tracing. -func (c *Client) TraceOff() { - // Disable tracing. - c.isTraceEnabled = false - c.traceErrorsOnly = false -} - -// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your -// requests. This feature is only specific to S3 for all other endpoints this -// function does nothing. To read further details on s3 transfer acceleration -// please vist - -// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - c.s3AccelerateEndpoint = accelerateEndpoint - } -} - -// Hash materials provides relevant initialized hash algo writers -// based on the expected signature type. -// -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. -func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) { - hashSums = make(map[string][]byte) - hashAlgos = make(map[string]hash.Hash) - if c.overrideSignerType.IsV4() { - if c.secure { - hashAlgos["md5"] = md5.New() - } else { - hashAlgos["sha256"] = sha256.New() - } - } else { - if c.overrideSignerType.IsAnonymous() { - hashAlgos["md5"] = md5.New() - } - } - return hashAlgos, hashSums -} - -// requestMetadata - is container for all the values to make a request. -type requestMetadata struct { - // If set newRequest presigns the URL. - presignURL bool - - // User supplied. - bucketName string - objectName string - queryValues url.Values - customHeader http.Header - expires int64 - - // Generated by our internal code. - bucketLocation string - contentBody io.Reader - contentLength int64 - contentMD5Base64 string // carries base64 encoded md5sum - contentSHA256Hex string // carries hex encoded sha256sum -} - -// dumpHTTP - dump HTTP request and response. -func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { - // Starts http dump. - _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") - if err != nil { - return err - } - - // Filter out Signature field from Authorization header. - origAuth := req.Header.Get("Authorization") - if origAuth != "" { - req.Header.Set("Authorization", redactSignature(origAuth)) - } - - // Only display request header. - reqTrace, err := httputil.DumpRequestOut(req, false) - if err != nil { - return err - } - - // Write request to trace output. - _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) - if err != nil { - return err - } - - // Only display response header. - var respTrace []byte - - // For errors we make sure to dump response body as well. - if resp.StatusCode != http.StatusOK && - resp.StatusCode != http.StatusPartialContent && - resp.StatusCode != http.StatusNoContent { - respTrace, err = httputil.DumpResponse(resp, true) - if err != nil { - return err - } - } else { - respTrace, err = httputil.DumpResponse(resp, false) - if err != nil { - return err - } - } - - // Write response to trace output. - _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) - if err != nil { - return err - } - - // Ends the http dump. - _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") - if err != nil { - return err - } - - // Returns success. - return nil -} - -// do - execute http request. -func (c Client) do(req *http.Request) (*http.Response, error) { - resp, err := c.httpClient.Do(req) - if err != nil { - // Handle this specifically for now until future Golang versions fix this issue properly. - if urlErr, ok := err.(*url.Error); ok { - if strings.Contains(urlErr.Err.Error(), "EOF") { - return nil, &url.Error{ - Op: urlErr.Op, - URL: urlErr.URL, - Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), - } - } - } - return nil, err - } - - // Response cannot be non-nil, report error if thats the case. - if resp == nil { - msg := "Response is empty. " + reportIssue - return nil, ErrInvalidArgument(msg) - } - - // If trace is enabled, dump http request and response, - // except when the traceErrorsOnly enabled and the response's status code is ok - if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { - err = c.dumpHTTP(req, resp) - if err != nil { - return nil, err - } - } - - return resp, nil -} - -// List of success status. -var successStatus = []int{ - http.StatusOK, - http.StatusNoContent, - http.StatusPartialContent, -} - -// executeMethod - instantiates a given method, and retries the -// request upon any error up to maxRetries attempts in a binomially -// delayed manner using a standard back off algorithm. -func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { - var isRetryable bool // Indicates if request can be retried. - var bodySeeker io.Seeker // Extracted seeker from io.Reader. - var reqRetry = MaxRetry // Indicates how many times we can retry the request - - if metadata.contentBody != nil { - // Check if body is seekable then it is retryable. - bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) - switch bodySeeker { - case os.Stdin, os.Stdout, os.Stderr: - isRetryable = false - } - // Retry only when reader is seekable - if !isRetryable { - reqRetry = 1 - } - - // Figure out if the body can be closed - if yes - // we will definitely close it upon the function - // return. - bodyCloser, ok := metadata.contentBody.(io.Closer) - if ok { - defer bodyCloser.Close() - } - } - - // Create a done channel to control 'newRetryTimer' go routine. - doneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(doneCh) - - // Blank indentifier is kept here on purpose since 'range' without - // blank identifiers is only supported since go1.4 - // https://golang.org/doc/go1.4#forrange. - for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { - // Retry executes the following function body if request has an - // error until maxRetries have been exhausted, retry attempts are - // performed after waiting for a given period of time in a - // binomial fashion. - if isRetryable { - // Seek back to beginning for each attempt. - if _, err = bodySeeker.Seek(0, 0); err != nil { - // If seek failed, no need to retry. - return nil, err - } - } - - // Instantiate a new request. - var req *http.Request - req, err = c.newRequest(method, metadata) - if err != nil { - errResponse := ToErrorResponse(err) - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - return nil, err - } - - // Add context to request - req = req.WithContext(ctx) - - // Initiate the request. - res, err = c.do(req) - if err != nil { - // For supported http requests errors verify. - if isHTTPReqErrorRetryable(err) { - continue // Retry. - } - // For other errors, return here no need to retry. - return nil, err - } - - // For any known successful http status, return quickly. - for _, httpStatus := range successStatus { - if httpStatus == res.StatusCode { - return res, nil - } - } - - // Read the body to be saved later. - errBodyBytes, err := ioutil.ReadAll(res.Body) - // res.Body should be closed - closeResponse(res) - if err != nil { - return nil, err - } - - // Save the body. - errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = ioutil.NopCloser(errBodySeeker) - - // For errors verify if its retryable otherwise fail quickly. - errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) - - // Save the body back again. - errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = ioutil.NopCloser(errBodySeeker) - - // Bucket region if set in error response and the error - // code dictates invalid region, we can retry the request - // with the new region. - // - // Additionally we should only retry if bucketLocation and custom - // region is empty. - if c.region == "" { - switch errResponse.Code { - case "AuthorizationHeaderMalformed": - fallthrough - case "InvalidRegion": - fallthrough - case "AccessDenied": - if metadata.bucketName != "" && errResponse.Region != "" { - // Gather Cached location only if bucketName is present. - if _, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk { - c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) - continue // Retry. - } - } else { - // Most probably for ListBuckets() - if errResponse.Region != metadata.bucketLocation { - // Retry if the error - // response has a - // different region - // than the request we - // just made. - metadata.bucketLocation = errResponse.Region - continue // Retry - } - } - } - } - - // Verify if error response code is retryable. - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - - // Verify if http status code is retryable. - if isHTTPStatusRetryable(res.StatusCode) { - continue // Retry. - } - - // For all other cases break out of the retry loop. - break - } - return res, err -} - -// newRequest - instantiate a new HTTP request for a given method. -func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { - // If no method is supplied default to 'POST'. - if method == "" { - method = "POST" - } - - location := metadata.bucketLocation - if location == "" { - if metadata.bucketName != "" { - // Gather location only if bucketName is present. - location, err = c.getBucketLocation(metadata.bucketName) - if err != nil { - return nil, err - } - } - if location == "" { - location = getDefaultLocation(*c.endpointURL, c.region) - } - } - - // Look if target url supports virtual host. - // We explicitly disallow MakeBucket calls to not use virtual DNS style, - // since the resolution may fail. - isMakeBucket := (metadata.objectName == "" && method == "PUT" && len(metadata.queryValues) == 0) - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket - - // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, - isVirtualHost, metadata.queryValues) - if err != nil { - return nil, err - } - - // Initialize a new HTTP request for the method. - req, err = http.NewRequest(method, targetURL.String(), nil) - if err != nil { - return nil, err - } - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - // Generate presign url if needed, return right here. - if metadata.expires != 0 && metadata.presignURL { - if signerType.IsAnonymous() { - return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") - } - if signerType.IsV2() { - // Presign URL with signature v2. - req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) - } else if signerType.IsV4() { - // Presign URL with signature v4. - req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) - } - return req, nil - } - - // Set 'User-Agent' header for the request. - c.setUserAgent(req) - - // Set all headers. - for k, v := range metadata.customHeader { - req.Header.Set(k, v[0]) - } - - // Go net/http notoriously closes the request body. - // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. - // This can cause underlying *os.File seekers to fail, avoid that - // by making sure to wrap the closer as a nop. - if metadata.contentLength == 0 { - req.Body = nil - } else { - req.Body = ioutil.NopCloser(metadata.contentBody) - } - - // Set incoming content-length. - req.ContentLength = metadata.contentLength - if req.ContentLength <= -1 { - // For unknown content length, we upload using transfer-encoding: chunked. - req.TransferEncoding = []string{"chunked"} - } - - // set md5Sum for content protection. - if len(metadata.contentMD5Base64) > 0 { - req.Header.Set("Content-Md5", metadata.contentMD5Base64) - } - - // For anonymous requests just return. - if signerType.IsAnonymous() { - return req, nil - } - - switch { - case signerType.IsV2(): - // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. - req = s3signer.StreamingSignV4(req, accessKeyID, - secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) - default: - // Set sha256 sum for signature calculation only with signature version '4'. - shaHeader := unsignedPayload - if metadata.contentSHA256Hex != "" { - shaHeader = metadata.contentSHA256Hex - } - req.Header.Set("X-Amz-Content-Sha256", shaHeader) - - // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) - } - - // Return request. - return req, nil -} - -// set User agent. -func (c Client) setUserAgent(req *http.Request) { - req.Header.Set("User-Agent", libraryUserAgent) - if c.appInfo.appName != "" && c.appInfo.appVersion != "" { - req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) - } -} - -// makeTargetURL make a new target url. -func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { - host := c.endpointURL.Host - // For Amazon S3 endpoint, try to fetch location based endpoint. - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - if c.s3AccelerateEndpoint != "" && bucketName != "" { - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - // Disable transfer acceleration for non-compliant bucket names. - if strings.Contains(bucketName, ".") { - return nil, ErrTransferAccelerationBucket(bucketName) - } - // If transfer acceleration is requested set new host. - // For more details about enabling transfer acceleration read here. - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - host = c.s3AccelerateEndpoint - } else { - // Do not change the host if the endpoint URL is a FIPS S3 endpoint. - if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { - // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) - } - } - } - - // Save scheme. - scheme := c.endpointURL.Scheme - - // Strip port 80 and 443 so we won't send these ports in Host header. - // The reason is that browsers and curl automatically remove :80 and :443 - // with the generated presigned urls, then a signature mismatch error. - if h, p, err := net.SplitHostPort(host); err == nil { - if scheme == "http" && p == "80" || scheme == "https" && p == "443" { - host = h - } - } - - urlStr := scheme + "://" + host + "/" - // Make URL only if bucketName is available, otherwise use the - // endpoint URL. - if bucketName != "" { - // If endpoint supports virtual host style use that always. - // Currently only S3 and Google Cloud Storage would support - // virtual host style. - if isVirtualHostStyle { - urlStr = scheme + "://" + bucketName + "." + host + "/" - if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) - } - } else { - // If not fall back to using path style. - urlStr = urlStr + bucketName + "/" - if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) - } - } - } - - // If there are any query values, add them to the end. - if len(queryValues) > 0 { - urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) - } - - return url.Parse(urlStr) -} - -// returns true if virtual hosted style requests are to be used. -func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { - if bucketName == "" { - return false - } - - if c.lookup == BucketLookupDNS { - return true - } - if c.lookup == BucketLookupPath { - return false - } - - // default to virtual only for Amazon/Google storage. In all other cases use - // path style requests - return s3utils.IsVirtualHostSupported(url, bucketName) -} diff --git a/vendor/github.com/minio/minio-go/v6/appveyor.yml b/vendor/github.com/minio/minio-go/v6/appveyor.yml deleted file mode 100644 index 39a33d876..000000000 --- a/vendor/github.com/minio/minio-go/v6/appveyor.yml +++ /dev/null @@ -1,35 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\minio\minio-go - -# environment variables -environment: - GOPATH: c:\gopath - GO111MODULE: on - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - - go get golang.org/x/lint/golint - - go get honnef.co/go/tools/cmd/staticcheck - -# to run your custom scripts instead of automatic MSBuild -build_script: - - go vet ./... - - gofmt -s -l . - - golint -set_exit_status github.com/minio/minio-go/... - - staticcheck - - go test -short -v ./... - - go test -short -race -v ./... - -# to disable automatic tests -test: off - -# to disable deployment -deploy: off diff --git a/vendor/github.com/minio/minio-go/v6/bucket-cache.go b/vendor/github.com/minio/minio-go/v6/bucket-cache.go deleted file mode 100644 index 7ba6cbb57..000000000 --- a/vendor/github.com/minio/minio-go/v6/bucket-cache.go +++ /dev/null @@ -1,238 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net" - "net/http" - "net/url" - "path" - "sync" - - "github.com/minio/minio-go/v6/pkg/credentials" - "github.com/minio/minio-go/v6/pkg/s3signer" - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// bucketLocationCache - Provides simple mechanism to hold bucket -// locations in memory. -type bucketLocationCache struct { - // mutex is used for handling the concurrent - // read/write requests for cache. - sync.RWMutex - - // items holds the cached bucket locations. - items map[string]string -} - -// newBucketLocationCache - Provides a new bucket location cache to be -// used internally with the client object. -func newBucketLocationCache() *bucketLocationCache { - return &bucketLocationCache{ - items: make(map[string]string), - } -} - -// Get - Returns a value of a given key if it exists. -func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { - r.RLock() - defer r.RUnlock() - location, ok = r.items[bucketName] - return -} - -// Set - Will persist a value into cache. -func (r *bucketLocationCache) Set(bucketName string, location string) { - r.Lock() - defer r.Unlock() - r.items[bucketName] = location -} - -// Delete - Deletes a bucket name from cache. -func (r *bucketLocationCache) Delete(bucketName string) { - r.Lock() - defer r.Unlock() - delete(r.items, bucketName) -} - -// GetBucketLocation - get location for the bucket name from location cache, if not -// fetch freshly by making a new request. -func (c Client) GetBucketLocation(bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - return c.getBucketLocation(bucketName) -} - -// getBucketLocation - Get location for the bucketName from location map cache, if not -// fetch freshly by making a new request. -func (c Client) getBucketLocation(bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - - // Region set then no need to fetch bucket location. - if c.region != "" { - return c.region, nil - } - - if location, ok := c.bucketLocCache.Get(bucketName); ok { - return location, nil - } - - // Initialize a new request. - req, err := c.getBucketLocationRequest(bucketName) - if err != nil { - return "", err - } - - // Initiate the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return "", err - } - location, err := processBucketLocationResponse(resp, bucketName) - if err != nil { - return "", err - } - c.bucketLocCache.Set(bucketName, location) - return location, nil -} - -// processes the getBucketLocation http response from the server. -func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { - if resp != nil { - if resp.StatusCode != http.StatusOK { - err = httpRespToErrorResponse(resp, bucketName, "") - errResp := ToErrorResponse(err) - // For access denied error, it could be an anonymous - // request. Move forward and let the top level callers - // succeed if possible based on their policy. - switch errResp.Code { - case "AuthorizationHeaderMalformed": - fallthrough - case "InvalidRegion": - fallthrough - case "AccessDenied": - if errResp.Region == "" { - return "us-east-1", nil - } - return errResp.Region, nil - } - return "", err - } - } - - // Extract location. - var locationConstraint string - err = xmlDecoder(resp.Body, &locationConstraint) - if err != nil { - return "", err - } - - location := locationConstraint - // Location is empty will be 'us-east-1'. - if location == "" { - location = "us-east-1" - } - - // Location can be 'EU' convert it to meaningful 'eu-west-1'. - if location == "EU" { - location = "eu-west-1" - } - - // Save the location into cache. - - // Return. - return location, nil -} - -// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. -func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { - // Set location query. - urlValues := make(url.Values) - urlValues.Set("location", "") - - // Set get bucket location always as path style. - targetURL := *c.endpointURL - - // as it works in makeTargetURL method from api.go file - if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { - if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { - targetURL.Host = h - } - } - - targetURL.Path = path.Join(bucketName, "") + "/" - targetURL.RawQuery = urlValues.Encode() - - // Get a new HTTP request for the method. - req, err := http.NewRequest("GET", targetURL.String(), nil) - if err != nil { - return nil, err - } - - // Set UserAgent for the request. - c.setUserAgent(req) - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if signerType.IsAnonymous() { - return req, nil - } - - if signerType.IsV2() { - // Get Bucket Location calls should be always path style - isVirtualHost := false - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - return req, nil - } - - // Set sha256 sum for signature calculation only with signature version '4'. - contentSha256 := emptySHA256Hex - if c.secure { - contentSha256 = unsignedPayload - } - - req.Header.Set("X-Amz-Content-Sha256", contentSha256) - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") - return req, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/bucket-notification.go b/vendor/github.com/minio/minio-go/v6/bucket-notification.go deleted file mode 100644 index 4714eadad..000000000 --- a/vendor/github.com/minio/minio-go/v6/bucket-notification.go +++ /dev/null @@ -1,273 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - - "github.com/minio/minio-go/v6/pkg/set" -) - -// NotificationEventType is a S3 notification event associated to the bucket notification configuration -type NotificationEventType string - -// The role of all event types are described in : -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations -const ( - ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" - ObjectCreatedPut = "s3:ObjectCreated:Put" - ObjectCreatedPost = "s3:ObjectCreated:Post" - ObjectCreatedCopy = "s3:ObjectCreated:Copy" - ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - ObjectAccessedGet = "s3:ObjectAccessed:Get" - ObjectAccessedHead = "s3:ObjectAccessed:Head" - ObjectAccessedAll = "s3:ObjectAccessed:*" - ObjectRemovedAll = "s3:ObjectRemoved:*" - ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" -) - -// FilterRule - child of S3Key, a tag in the notification xml which -// carries suffix/prefix filters -type FilterRule struct { - Name string `xml:"Name"` - Value string `xml:"Value"` -} - -// S3Key - child of Filter, a tag in the notification xml which -// carries suffix/prefix filters -type S3Key struct { - FilterRules []FilterRule `xml:"FilterRule,omitempty"` -} - -// Filter - a tag in the notification xml structure which carries -// suffix/prefix filters -type Filter struct { - S3Key S3Key `xml:"S3Key,omitempty"` -} - -// Arn - holds ARN information that will be sent to the web service, -// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -type Arn struct { - Partition string - Service string - Region string - AccountID string - Resource string -} - -// NewArn creates new ARN based on the given partition, service, region, account id and resource -func NewArn(partition, service, region, accountID, resource string) Arn { - return Arn{Partition: partition, - Service: service, - Region: region, - AccountID: accountID, - Resource: resource} -} - -// Return the string format of the ARN -func (arn Arn) String() string { - return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource -} - -// NotificationConfig - represents one single notification configuration -// such as topic, queue or lambda configuration. -type NotificationConfig struct { - ID string `xml:"Id,omitempty"` - Arn Arn `xml:"-"` - Events []NotificationEventType `xml:"Event"` - Filter *Filter `xml:"Filter,omitempty"` -} - -// NewNotificationConfig creates one notification config and sets the given ARN -func NewNotificationConfig(arn Arn) NotificationConfig { - return NotificationConfig{Arn: arn, Filter: &Filter{}} -} - -// AddEvents adds one event to the current notification config -func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { - t.Events = append(t.Events, events...) -} - -// AddFilterSuffix sets the suffix configuration to the current notification config -func (t *NotificationConfig) AddFilterSuffix(suffix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "suffix", Value: suffix} - // Replace any suffix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "suffix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// AddFilterPrefix sets the prefix configuration to the current notification config -func (t *NotificationConfig) AddFilterPrefix(prefix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "prefix", Value: prefix} - // Replace any prefix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "prefix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// TopicConfig carries one single topic notification configuration -type TopicConfig struct { - NotificationConfig - Topic string `xml:"Topic"` -} - -// QueueConfig carries one single queue notification configuration -type QueueConfig struct { - NotificationConfig - Queue string `xml:"Queue"` -} - -// LambdaConfig carries one single cloudfunction notification configuration -type LambdaConfig struct { - NotificationConfig - Lambda string `xml:"CloudFunction"` -} - -// BucketNotification - the struct that represents the whole XML to be sent to the web service -type BucketNotification struct { - XMLName xml.Name `xml:"NotificationConfiguration"` - LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` - TopicConfigs []TopicConfig `xml:"TopicConfiguration"` - QueueConfigs []QueueConfig `xml:"QueueConfiguration"` -} - -// AddTopic adds a given topic config to the general bucket notification config -func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { - newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} - for _, n := range b.TopicConfigs { - // If new config matches existing one - if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range topicConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) - return true -} - -// AddQueue adds a given queue config to the general bucket notification config -func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { - newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} - for _, n := range b.QueueConfigs { - if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range queueConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) - return true -} - -// AddLambda adds a given lambda config to the general bucket notification config -func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { - newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} - for _, n := range b.LambdaConfigs { - if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range lambdaConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) - return true -} - -// RemoveTopicByArn removes all topic configurations that match the exact specified ARN -func (b *BucketNotification) RemoveTopicByArn(arn Arn) { - var topics []TopicConfig - for _, topic := range b.TopicConfigs { - if topic.Topic != arn.String() { - topics = append(topics, topic) - } - } - b.TopicConfigs = topics -} - -// RemoveQueueByArn removes all queue configurations that match the exact specified ARN -func (b *BucketNotification) RemoveQueueByArn(arn Arn) { - var queues []QueueConfig - for _, queue := range b.QueueConfigs { - if queue.Queue != arn.String() { - queues = append(queues, queue) - } - } - b.QueueConfigs = queues -} - -// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN -func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { - var lambdas []LambdaConfig - for _, lambda := range b.LambdaConfigs { - if lambda.Lambda != arn.String() { - lambdas = append(lambdas, lambda) - } - } - b.LambdaConfigs = lambdas -} diff --git a/vendor/github.com/minio/minio-go/v6/constants.go b/vendor/github.com/minio/minio-go/v6/constants.go deleted file mode 100644 index ac472a631..000000000 --- a/vendor/github.com/minio/minio-go/v6/constants.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -/// Multipart upload defaults. - -// absMinPartSize - absolute minimum part size (5 MiB) below which -// a part in a multipart upload may not be uploaded. -const absMinPartSize = 1024 * 1024 * 5 - -// minPartSize - minimum part size 128MiB per object after which -// putObject behaves internally as multipart. -const minPartSize = 1024 * 1024 * 128 - -// maxPartsCount - maximum number of parts for a single multipart session. -const maxPartsCount = 10000 - -// maxPartSize - maximum part size 5GiB for a single multipart upload -// operation. -const maxPartSize = 1024 * 1024 * 1024 * 5 - -// maxSinglePutObjectSize - maximum size 5GiB of object per PUT -// operation. -const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 - -// maxMultipartPutObjectSize - maximum size 5TiB of object for -// Multipart operation. -const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -// we don't want to sign the request payload -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// Total number of parallel workers used for multipart operation. -const totalWorkers = 4 - -// Signature related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" -) - -// Storage class header constant. -const amzStorageClass = "X-Amz-Storage-Class" - -// Website redirect location header constant -const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" diff --git a/vendor/github.com/minio/minio-go/v6/core.go b/vendor/github.com/minio/minio-go/v6/core.go deleted file mode 100644 index 0d43ae0f5..000000000 --- a/vendor/github.com/minio/minio-go/v6/core.go +++ /dev/null @@ -1,201 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "net/http" - "strings" - - "github.com/minio/minio-go/v6/pkg/encrypt" -) - -// Core - Inherits Client and adds new methods to expose the low level S3 APIs. -type Core struct { - *Client -} - -// NewCore - Returns new initialized a Core client, this CoreClient should be -// only used under special conditions such as need to access lower primitives -// and being able to use them to write your own wrappers. -func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { - var s3Client Core - client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) - if err != nil { - return nil, err - } - s3Client.Client = client - return &s3Client, nil -} - -// ListObjects - List all the objects at a prefix, optionally with marker and delimiter -// you can further filter the results. -func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { - return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) -} - -// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses -// continuationToken instead of marker to support iteration over the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { - return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys, startAfter) -} - -// CopyObjectWithContext - copies an object from source object to destination object on server side. -func (c Core) CopyObjectWithContext(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { - return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata) -} - -// CopyObject - copies an object from source object to destination object on server side. -func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { - return c.CopyObjectWithContext(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) -} - -// CopyObjectPartWithContext - creates a part in a multipart upload by copying (a -// part of) an existing object. -func (c Core) CopyObjectPartWithContext(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - - return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, - partID, startOffset, length, metadata) -} - -// CopyObjectPart - creates a part in a multipart upload by copying (a -// part of) an existing object. -func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - - return c.CopyObjectPartWithContext(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID, - partID, startOffset, length, metadata) -} - -// PutObjectWithContext - Upload object. Uploads using single PUT call. -func (c Core) PutObjectWithContext(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { - opts := PutObjectOptions{} - m := make(map[string]string) - for k, v := range metadata { - if strings.ToLower(k) == "content-encoding" { - opts.ContentEncoding = v - } else if strings.ToLower(k) == "content-disposition" { - opts.ContentDisposition = v - } else if strings.ToLower(k) == "content-language" { - opts.ContentLanguage = v - } else if strings.ToLower(k) == "content-type" { - opts.ContentType = v - } else if strings.ToLower(k) == "cache-control" { - opts.CacheControl = v - } else if strings.EqualFold(k, amzWebsiteRedirectLocation) { - opts.WebsiteRedirectLocation = v - } else { - m[k] = metadata[k] - } - } - opts.UserMetadata = m - opts.ServerSideEncryption = sse - return c.putObjectDo(ctx, bucket, object, data, md5Base64, sha256Hex, size, opts) -} - -// PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { - return c.PutObjectWithContext(context.Background(), bucket, object, data, size, md5Base64, sha256Hex, metadata, sse) -} - -// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. -func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { - result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) - return result.UploadID, err -} - -// ListMultipartUploads - List incomplete uploads. -func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { - return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) -} - -// PutObjectPartWithContext - Upload an object part. -func (c Core) PutObjectPartWithContext(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) -} - -// PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - return c.PutObjectPartWithContext(context.Background(), bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, sse) -} - -// ListObjectParts - List uploaded parts of an incomplete upload.x -func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { - return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) -} - -// CompleteMultipartUploadWithContext - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUploadWithContext(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) { - res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ - Parts: parts, - }) - return res.ETag, err -} - -// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) { - return c.CompleteMultipartUploadWithContext(context.Background(), bucket, object, uploadID, parts) -} - -// AbortMultipartUploadWithContext - Abort an incomplete upload. -func (c Core) AbortMultipartUploadWithContext(ctx context.Context, bucket, object, uploadID string) error { - return c.abortMultipartUpload(ctx, bucket, object, uploadID) -} - -// AbortMultipartUpload - Abort an incomplete upload. -func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { - return c.AbortMultipartUploadWithContext(context.Background(), bucket, object, uploadID) -} - -// GetBucketPolicy - fetches bucket access policy for a given bucket. -func (c Core) GetBucketPolicy(bucket string) (string, error) { - return c.getBucketPolicy(bucket) -} - -// PutBucketPolicy - applies a new bucket access policy for a given bucket. -func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error { - return c.putBucketPolicy(bucket, bucketPolicy) -} - -// GetObjectWithContext is a lower level API implemented to support reading -// partial objects and also downloading objects with special conditions -// matching etag, modtime etc. -func (c Core) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { - return c.getObject(ctx, bucketName, objectName, opts) -} - -// GetObject is a lower level API implemented to support reading -// partial objects and also downloading objects with special conditions -// matching etag, modtime etc. -func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { - return c.GetObjectWithContext(context.Background(), bucketName, objectName, opts) -} - -// StatObjectWithContext is a lower level API implemented to support special -// conditions matching etag, modtime on a request. -func (c Core) StatObjectWithContext(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - return c.statObject(ctx, bucketName, objectName, opts) -} - -// StatObject is a lower level API implemented to support special -// conditions matching etag, modtime on a request. -func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - return c.StatObjectWithContext(context.Background(), bucketName, objectName, opts) -} diff --git a/vendor/github.com/minio/minio-go/v6/go.mod b/vendor/github.com/minio/minio-go/v6/go.mod deleted file mode 100644 index 2dc189d1a..000000000 --- a/vendor/github.com/minio/minio-go/v6/go.mod +++ /dev/null @@ -1,14 +0,0 @@ -module github.com/minio/minio-go/v6 - -go 1.12 - -require ( - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/minio/sha256-simd v0.1.1 - github.com/mitchellh/go-homedir v1.1.0 - github.com/sirupsen/logrus v1.4.2 // indirect - github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect - golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f - golang.org/x/net v0.0.0-20190522155817-f3200d17e092 - gopkg.in/ini.v1 v1.42.0 -) diff --git a/vendor/github.com/minio/minio-go/v6/go.sum b/vendor/github.com/minio/minio-go/v6/go.sum deleted file mode 100644 index cd02277ed..000000000 --- a/vendor/github.com/minio/minio-go/v6/go.sum +++ /dev/null @@ -1,41 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= diff --git a/vendor/github.com/minio/minio-go/v6/hook-reader.go b/vendor/github.com/minio/minio-go/v6/hook-reader.go deleted file mode 100644 index f251c1e95..000000000 --- a/vendor/github.com/minio/minio-go/v6/hook-reader.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "fmt" - "io" -) - -// hookReader hooks additional reader in the source stream. It is -// useful for making progress bars. Second reader is appropriately -// notified about the exact number of bytes read from the primary -// source on each Read operation. -type hookReader struct { - source io.Reader - hook io.Reader -} - -// Seek implements io.Seeker. Seeks source first, and if necessary -// seeks hook if Seek method is appropriately found. -func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { - // Verify for source has embedded Seeker, use it. - sourceSeeker, ok := hr.source.(io.Seeker) - if ok { - n, err = sourceSeeker.Seek(offset, whence) - if err != nil { - return 0, err - } - } - - // Verify if hook has embedded Seeker, use it. - hookSeeker, ok := hr.hook.(io.Seeker) - if ok { - var m int64 - m, err = hookSeeker.Seek(offset, whence) - if err != nil { - return 0, err - } - if n != m { - return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) - } - } - return n, nil -} - -// Read implements io.Reader. Always reads from the source, the return -// value 'n' number of bytes are reported through the hook. Returns -// error for all non io.EOF conditions. -func (hr *hookReader) Read(b []byte) (n int, err error) { - n, err = hr.source.Read(b) - if err != nil && err != io.EOF { - return n, err - } - // Progress the hook with the total read bytes from the source. - if _, herr := hr.hook.Read(b[:n]); herr != nil { - if herr != io.EOF { - return n, herr - } - } - return n, err -} - -// newHook returns a io.ReadSeeker which implements hookReader that -// reports the data read from the source to the hook. -func newHook(source, hook io.Reader) io.Reader { - if hook == nil { - return source - } - return &hookReader{source, hook} -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/chain.go deleted file mode 100644 index 6dc8e9d05..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/chain.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Chain will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The Chain provides a way of chaining multiple providers together -// which will pick the first available using priority order of the -// Providers in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the no credentials value. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again after IsExpired() is true. -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } -// -type Chain struct { - Providers []Provider - curr Provider -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return New(&Chain{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value, returns no credentials(anonymous) -// if no credentials provider returned any value. -// -// If a provider is found with credentials, it will be cached and any calls -// to IsExpired() will return the expired state of the cached provider. -func (c *Chain) Retrieve() (Value, error) { - for _, p := range c.Providers { - creds, _ := p.Retrieve() - // Always prioritize non-anonymous providers, if any. - if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { - continue - } - c.curr = p - return creds, nil - } - // At this point we have exhausted all the providers and - // are left without any credentials return anonymous. - return Value{ - SignerType: SignatureAnonymous, - }, nil -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *Chain) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v6/pkg/credentials/config.json.sample deleted file mode 100644 index d793c9e0e..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/config.json.sample +++ /dev/null @@ -1,17 +0,0 @@ -{ - "version": "8", - "hosts": { - "play": { - "url": "https://play.min.io", - "accessKey": "Q3AM3UQ867SPQQA43P2F", - "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", - "api": "S3v2" - }, - "s3": { - "url": "https://s3.amazonaws.com", - "accessKey": "accessKey", - "secretKey": "secret", - "api": "S3v4" - } - } -} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.go deleted file mode 100644 index 1a48751b5..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.go +++ /dev/null @@ -1,175 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "sync" - "time" -) - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Signature Type. - SignerType SignatureType -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type IAMCredentialProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// Credentials - A container for synchronous safe retrieval of credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - sync.Mutex - - creds Value - forceRefresh bool - provider Provider -} - -// New returns a pointer to a new Credentials with the provider set. -func New(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - c.Lock() - defer c.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.Lock() - defer c.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be refreshed. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.Lock() - defer c.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.sample deleted file mode 100644 index 7fc91d9d2..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.sample +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/doc.go deleted file mode 100644 index 0c94477b7..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package credentials provides credential retrieval and management -// for S3 compatible object storage. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_aws.go deleted file mode 100644 index b6e60d0e1..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_aws.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvAWS retrieves credentials from the environment variables of the -// running process. EnvAWSironment credentials never expire. -// -// EnvAWSironment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. -// * Secret Token: AWS_SESSION_TOKEN. -type EnvAWS struct { - retrieved bool -} - -// NewEnvAWS returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvAWS() *Credentials { - return New(&EnvAWS{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvAWS) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - signerType := SignatureV4 - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvAWS) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_minio.go deleted file mode 100644 index 5f1ae0d25..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_minio.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvMinio retrieves credentials from the environment variables of the -// running process. EnvMinioironment credentials never expire. -// -// EnvMinioironment variables used: -// -// * Access Key ID: MINIO_ACCESS_KEY. -// * Secret Access Key: MINIO_SECRET_KEY. -type EnvMinio struct { - retrieved bool -} - -// NewEnvMinio returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvMinio() *Credentials { - return New(&EnvMinio{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvMinio) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("MINIO_ACCESS_KEY") - secret := os.Getenv("MINIO_SECRET_KEY") - - signerType := SignatureV4 - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvMinio) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_aws_credentials.go deleted file mode 100644 index ff07bc55b..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_aws_credentials.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "os" - "path/filepath" - - homedir "github.com/mitchellh/go-homedir" - ini "gopkg.in/ini.v1" -) - -// A FileAWSCredentials retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type FileAWSCredentials struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileAWSCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewFileAWSCredentials(filename string, profile string) *Credentials { - return New(&FileAWSCredentials{ - filename: filename, - profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileAWSCredentials) Retrieve() (Value, error) { - if p.filename == "" { - p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") - if p.filename == "" { - homeDir, err := homedir.Dir() - if err != nil { - return Value{}, err - } - p.filename = filepath.Join(homeDir, ".aws", "credentials") - } - } - if p.profile == "" { - p.profile = os.Getenv("AWS_PROFILE") - if p.profile == "" { - p.profile = "default" - } - } - - p.retrieved = false - - iniProfile, err := loadProfile(p.filename, p.profile) - if err != nil { - return Value{}, err - } - - // Default to empty string if not found. - id := iniProfile.Key("aws_access_key_id") - // Default to empty string if not found. - secret := iniProfile.Key("aws_secret_access_key") - // Default to empty string if not found. - token := iniProfile.Key("aws_session_token") - - p.retrieved = true - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - SignerType: SignatureV4, - }, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *FileAWSCredentials) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (*ini.Section, error) { - config, err := ini.Load(filename) - if err != nil { - return nil, err - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return nil, err - } - return iniProfile, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_minio_client.go deleted file mode 100644 index 117ceb6ef..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_minio_client.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - homedir "github.com/mitchellh/go-homedir" -) - -// A FileMinioClient retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Configuration file example: $HOME/.mc/config.json -type FileMinioClient struct { - // Path to the shared credentials file. - // - // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.mc/config.json" - // Windows: "%USERALIAS%\mc\config.json" - filename string - - // MinIO Alias to extract credentials from the shared credentials file. If empty - // will default to environment variable "MINIO_ALIAS" or "default" if - // environment variable is also not set. - alias string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileMinioClient returns a pointer to a new Credentials object -// wrapping the Alias file provider. -func NewFileMinioClient(filename string, alias string) *Credentials { - return New(&FileMinioClient{ - filename: filename, - alias: alias, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileMinioClient) Retrieve() (Value, error) { - if p.filename == "" { - if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { - p.filename = value - } else { - homeDir, err := homedir.Dir() - if err != nil { - return Value{}, err - } - p.filename = filepath.Join(homeDir, ".mc", "config.json") - if runtime.GOOS == "windows" { - p.filename = filepath.Join(homeDir, "mc", "config.json") - } - } - } - - if p.alias == "" { - p.alias = os.Getenv("MINIO_ALIAS") - if p.alias == "" { - p.alias = "s3" - } - } - - p.retrieved = false - - hostCfg, err := loadAlias(p.filename, p.alias) - if err != nil { - return Value{}, err - } - - p.retrieved = true - return Value{ - AccessKeyID: hostCfg.AccessKey, - SecretAccessKey: hostCfg.SecretKey, - SignerType: parseSignatureType(hostCfg.API), - }, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *FileMinioClient) IsExpired() bool { - return !p.retrieved -} - -// hostConfig configuration of a host. -type hostConfig struct { - URL string `json:"url"` - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - API string `json:"api"` -} - -// config config version. -type config struct { - Version string `json:"version"` - Hosts map[string]hostConfig `json:"hosts"` -} - -// loadAliass loads from the file pointed to by shared credentials filename for alias. -// The credentials retrieved from the alias will be returned or error. Error will be -// returned if it fails to read from the file. -func loadAlias(filename, alias string) (hostConfig, error) { - cfg := &config{} - configBytes, err := ioutil.ReadFile(filename) - if err != nil { - return hostConfig{}, err - } - if err = json.Unmarshal(configBytes, cfg); err != nil { - return hostConfig{}, err - } - return cfg.Hosts[alias], nil -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/iam_aws.go deleted file mode 100644 index 5732f2e4b..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/iam_aws.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bufio" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path" - "time" -) - -// DefaultExpiryWindow - Default expiry window. -// ExpiryWindow will allow the credentials to trigger refreshing -// prior to the credentials actually expiring. This is beneficial -// so race conditions with expiring credentials do not cause -// request to fail unexpectedly due to ExpiredTokenException exceptions. -const DefaultExpiryWindow = time.Second * 10 // 10 secs - -// A IAM retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -type IAM struct { - Expiry - - // Required http Client to use when connecting to IAM metadata service. - Client *http.Client - - // Custom endpoint to fetch IAM role credentials. - endpoint string -} - -// IAM Roles for Amazon EC2 -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -const ( - defaultIAMRoleEndpoint = "http://169.254.169.254" - defaultECSRoleEndpoint = "http://169.254.170.2" - defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" -) - -// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html -func getEndpoint(endpoint string) (string, bool) { - if endpoint != "" { - return endpoint, os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" - } - if ecsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); ecsURI != "" { - return fmt.Sprintf("%s%s", defaultECSRoleEndpoint, ecsURI), true - } - return defaultIAMRoleEndpoint, false -} - -// NewIAM returns a pointer to a new Credentials object wrapping the IAM. -func NewIAM(endpoint string) *Credentials { - p := &IAM{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - endpoint: endpoint, - } - return New(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired -func (m *IAM) Retrieve() (Value, error) { - endpoint, isEcsTask := getEndpoint(m.endpoint) - var roleCreds ec2RoleCredRespBody - var err error - if isEcsTask { - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) - } else { - roleCreds, err = getCredentials(m.Client, endpoint) - } - if err != nil { - return Value{}, err - } - // Expiry window is set to 10secs. - m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - SignerType: SignatureV4, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string - - // Unused params. - LastUpdated time.Time - Type string -} - -// Get the final IAM role URL where the request will -// be sent to fetch the rolling access credentials. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func getIAMRoleURL(endpoint string) (*url.URL, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - u.Path = defaultIAMSecurityCredsPath - return u, nil -} - -// listRoleNames lists of credential role names associated -// with the current EC2 service. If there are no credentials, -// or there is an error making or receiving the request. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, errors.New(resp.Status) - } - - credsList := []string{} - s := bufio.NewScanner(resp.Body) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, err - } - - return credsList, nil -} - -func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - return respCreds, nil -} - -// getCredentials - obtains the credentials from the IAM role name associated with -// the current EC2 service. -// -// If the credentials cannot be found, or there is an error -// reading the response an error will be returned. -func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - u, err := getIAMRoleURL(endpoint) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - roleNames, err := listRoleNames(client, u) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - if len(roleNames) == 0 { - return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // - An instance profile can contain only one IAM role. This limit cannot be increased. - roleName := roleNames[0] - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // The following command retrieves the security credentials for an - // IAM role named `s3access`. - // - // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access - // - u.Path = path.Join(u.Path, roleName) - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, errors.New(respCreds.Message) - } - - return respCreds, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/signature-type.go deleted file mode 100644 index b79433305..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/signature-type.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "strings" - -// SignatureType is type of Authorization requested for a given HTTP request. -type SignatureType int - -// Different types of supported signatures - default is SignatureV4 or SignatureDefault. -const ( - // SignatureDefault is always set to v4. - SignatureDefault SignatureType = iota - SignatureV4 - SignatureV2 - SignatureV4Streaming - SignatureAnonymous // Anonymous signature signifies, no signature. -) - -// IsV2 - is signature SignatureV2? -func (s SignatureType) IsV2() bool { - return s == SignatureV2 -} - -// IsV4 - is signature SignatureV4? -func (s SignatureType) IsV4() bool { - return s == SignatureV4 || s == SignatureDefault -} - -// IsStreamingV4 - is signature SignatureV4Streaming? -func (s SignatureType) IsStreamingV4() bool { - return s == SignatureV4Streaming -} - -// IsAnonymous - is signature empty? -func (s SignatureType) IsAnonymous() bool { - return s == SignatureAnonymous -} - -// Stringer humanized version of signature type, -// strings returned here are case insensitive. -func (s SignatureType) String() string { - if s.IsV2() { - return "S3v2" - } else if s.IsV4() { - return "S3v4" - } else if s.IsStreamingV4() { - return "S3v4Streaming" - } - return "Anonymous" -} - -func parseSignatureType(str string) SignatureType { - if strings.EqualFold(str, "S3v4") { - return SignatureV4 - } else if strings.EqualFold(str, "S3v2") { - return SignatureV2 - } else if strings.EqualFold(str, "S3v4Streaming") { - return SignatureV4Streaming - } - return SignatureAnonymous -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/static.go deleted file mode 100644 index 7dde00b0a..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/static.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Static is a set of credentials which are set programmatically, -// and will never expire. -type Static struct { - Value -} - -// NewStaticV2 returns a pointer to a new Credentials object -// wrapping a static credentials value provider, signature is -// set to v2. If access and secret are not specified then -// regardless of signature type set it Value will return -// as anonymous. -func NewStaticV2(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV2) -} - -// NewStaticV4 is similar to NewStaticV2 with similar considerations. -func NewStaticV4(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV4) -} - -// NewStatic returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { - return New(&Static{ - Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - SignerType: signerType, - }, - }) -} - -// Retrieve returns the static credentials. -func (s *Static) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - // Anonymous is not an error - return Value{SignerType: SignatureAnonymous}, nil - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For Static, the credentials never expired. -func (s *Static) IsExpired() bool { - return false -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_client_grants.go deleted file mode 100644 index 03134c3d2..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_client_grants.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "time" -) - -// AssumedRoleUser - The identifiers for the temporary security credentials that -// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser -type AssumedRoleUser struct { - Arn string - AssumedRoleID string `xml:"AssumeRoleId"` -} - -// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. -type AssumeRoleWithClientGrantsResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` - Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants -// request, including temporary credentials that can be used to make MinIO API requests. -type ClientGrantsResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromClientGrantsToken string `xml:",omitempty"` -} - -// ClientGrantsToken - client grants token with expiry. -type ClientGrantsToken struct { - Token string - Expiry int -} - -// A STSClientGrants retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSClientGrants struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // MinIO endpoint to fetch STS credentials. - stsEndpoint string - - // getClientGrantsTokenExpiry function to retrieve tokens - // from IDP This function should return two values one is - // accessToken which is a self contained access token (JWT) - // and second return value is the expiry associated with - // this token. This is a customer provided function and - // is mandatory. - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error) -} - -// NewSTSClientGrants returns a pointer to a new -// Credentials object wrapping the STSClientGrants. -func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getClientGrantsTokenExpiry == nil { - return nil, errors.New("Client grants access token and expiry retrieval function should be defined") - } - return New(&STSClientGrants{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - stsEndpoint: stsEndpoint, - getClientGrantsTokenExpiry: getClientGrantsTokenExpiry, - }), nil -} - -func getClientGrantsCredentials(clnt *http.Client, endpoint string, - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { - - accessToken, err := getClientGrantsTokenExpiry() - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithClientGrants") - v.Set("Token", accessToken.Token) - v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) - v.Set("Version", "2011-06-15") - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - u.RawQuery = v.Encode() - - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) - } - - a := AssumeRoleWithClientGrantsResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSClientGrants) Retrieve() (Value, error) { - a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_ldap_identity.go deleted file mode 100644 index b72ac061c..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_ldap_identity.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "net/http" - "net/url" - "time" -) - -// AssumeRoleWithLDAPResponse contains the result of successful -// AssumeRoleWithLDAPIdentity request -type AssumeRoleWithLDAPResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` - Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// LDAPIdentityResult - contains credentials for a successful -// AssumeRoleWithLDAPIdentity request. -type LDAPIdentityResult struct { - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - - SubjectFromToken string `xml:",omitempty"` -} - -// LDAPIdentity retrieves credentials from MinIO -type LDAPIdentity struct { - Expiry - - stsEndpoint string - - ldapUsername, ldapPassword string -} - -// NewLDAPIdentity returns new credentials object that uses LDAP -// Identity. -func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) { - return New(&LDAPIdentity{ - stsEndpoint: stsEndpoint, - ldapUsername: ldapUsername, - ldapPassword: ldapPassword, - }), nil -} - -// Retrieve gets the credential by calling the MinIO STS API for -// LDAP on the configured stsEndpoint. -func (k *LDAPIdentity) Retrieve() (value Value, err error) { - u, kerr := url.Parse(k.stsEndpoint) - if kerr != nil { - err = kerr - return - } - - clnt := &http.Client{Transport: http.DefaultTransport} - v := url.Values{} - v.Set("Action", "AssumeRoleWithLDAPIdentity") - v.Set("Version", "2011-06-15") - v.Set("LDAPUsername", k.ldapUsername) - v.Set("LDAPPassword", k.ldapPassword) - - u.RawQuery = v.Encode() - - req, kerr := http.NewRequest("POST", u.String(), nil) - if kerr != nil { - err = kerr - return - } - - resp, kerr := clnt.Do(req) - if kerr != nil { - err = kerr - return - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - err = errors.New(resp.Status) - return - } - - r := AssumeRoleWithLDAPResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { - return - } - - cr := r.Result.Credentials - k.SetExpiration(cr.Expiration, DefaultExpiryWindow) - return Value{ - AccessKeyID: cr.AccessKey, - SecretAccessKey: cr.SecretKey, - SessionToken: cr.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_web_identity.go deleted file mode 100644 index 4d53bd236..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_web_identity.go +++ /dev/null @@ -1,158 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "time" -) - -// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. -type AssumeRoleWithWebIdentityResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` - Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity -// request, including temporary credentials that can be used to make MinIO API requests. -type WebIdentityResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromWebIdentityToken string `xml:",omitempty"` -} - -// WebIdentityToken - web identity token with expiry. -type WebIdentityToken struct { - Token string - Expiry int -} - -// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSWebIdentity struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // MinIO endpoint to fetch STS credentials. - stsEndpoint string - - // getWebIDTokenExpiry function which returns ID tokens - // from IDP. This function should return two values one - // is ID token which is a self contained ID token (JWT) - // and second return value is the expiry associated with - // this token. - // This is a customer provided function and is mandatory. - getWebIDTokenExpiry func() (*WebIdentityToken, error) -} - -// NewSTSWebIdentity returns a pointer to a new -// Credentials object wrapping the STSWebIdentity. -func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getWebIDTokenExpiry == nil { - return nil, errors.New("Web ID token and expiry retrieval function should be defined") - } - return New(&STSWebIdentity{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - stsEndpoint: stsEndpoint, - getWebIDTokenExpiry: getWebIDTokenExpiry, - }), nil -} - -func getWebIdentityCredentials(clnt *http.Client, endpoint string, - getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { - idToken, err := getWebIDTokenExpiry() - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithWebIdentity") - v.Set("WebIdentityToken", idToken.Token) - v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) - v.Set("Version", "2011-06-15") - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - u.RawQuery = v.Encode() - - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) - } - - a := AssumeRoleWithWebIdentityResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.getWebIDTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v6/pkg/encrypt/server-side.go deleted file mode 100644 index ac0b69a02..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/encrypt/server-side.go +++ /dev/null @@ -1,195 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "net/http" - - "golang.org/x/crypto/argon2" -) - -const ( - // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. - sseGenericHeader = "X-Amz-Server-Side-Encryption" - - // sseKmsKeyID is the AWS SSE-KMS key id. - sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" - // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. - sseEncryptionContext = sseGenericHeader + "-Encryption-Context" - - // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. - sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" - // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. - sseCustomerKey = sseGenericHeader + "-Customer-Key" - // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. - sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" - - // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. - sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. - sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. - sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" -) - -// PBKDF creates a SSE-C key from the provided password and salt. -// PBKDF is a password-based key derivation function -// which can be used to derive a high-entropy cryptographic -// key from a low-entropy password and a salt. -type PBKDF func(password, salt []byte) ServerSide - -// DefaultPBKDF is the default PBKDF. It uses Argon2id with the -// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). -var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { - sse := ssec{} - copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) - return sse -} - -// Type is the server-side-encryption method. It represents one of -// the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption -type Type string - -const ( - // SSEC represents server-side-encryption with customer provided keys - SSEC Type = "SSE-C" - // KMS represents server-side-encryption with managed keys - KMS Type = "KMS" - // S3 represents server-side-encryption using S3 storage encryption - S3 Type = "S3" -) - -// ServerSide is a form of S3 server-side-encryption. -type ServerSide interface { - // Type returns the server-side-encryption method. - Type() Type - - // Marshal adds encryption headers to the provided HTTP headers. - // It marks an HTTP request as server-side-encryption request - // and inserts the required data into the headers. - Marshal(h http.Header) -} - -// NewSSE returns a server-side-encryption using S3 storage encryption. -// Using SSE-S3 the server will encrypt the object with server-managed keys. -func NewSSE() ServerSide { return s3{} } - -// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. -func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { - if context == nil { - return kms{key: keyID, hasContext: false}, nil - } - serializedContext, err := json.Marshal(context) - if err != nil { - return nil, err - } - return kms{key: keyID, context: serializedContext, hasContext: true}, nil -} - -// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. -// The key must be 32 bytes long. -func NewSSEC(key []byte) (ServerSide, error) { - if len(key) != 32 { - return nil, errors.New("encrypt: SSE-C key must be 256 bit long") - } - sse := ssec{} - copy(sse[:], key) - return sse, nil -} - -// SSE transforms a SSE-C copy encryption into a SSE-C encryption. -// It is the inverse of SSECopy(...). -// -// If the provided sse is no SSE-C copy encryption SSE returns -// sse unmodified. -func SSE(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssecCopy); ok { - return ssec(sse) - } - return sse -} - -// SSECopy transforms a SSE-C encryption into a SSE-C copy -// encryption. This is required for SSE-C key rotation or a SSE-C -// copy where the source and the destination should be encrypted. -// -// If the provided sse is no SSE-C encryption SSECopy returns -// sse unmodified. -func SSECopy(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssec); ok { - return ssecCopy(sse) - } - return sse -} - -type ssec [32]byte - -func (s ssec) Type() Type { return SSEC } - -func (s ssec) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(sseCustomerAlgorithm, "AES256") - h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type ssecCopy [32]byte - -func (s ssecCopy) Type() Type { return SSEC } - -func (s ssecCopy) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(sseCopyCustomerAlgorithm, "AES256") - h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type s3 struct{} - -func (s s3) Type() Type { return S3 } - -func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } - -type kms struct { - key string - context []byte - hasContext bool -} - -func (s kms) Type() Type { return KMS } - -func (s kms) Marshal(h http.Header) { - h.Set(sseGenericHeader, "aws:kms") - h.Set(sseKmsKeyID, s.key) - if s.hasContext { - h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) - } -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-streaming.go deleted file mode 100644 index 810b47c4b..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-streaming.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "strconv" - "strings" - "time" -) - -// Reference for constants used below - -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming -const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF -) - -// Request headers to be ignored while calculating seed signature for -// a request. -var ignoredStreamingHeaders = map[string]bool{ - "Authorization": true, - "User-Agent": true, - "Content-Type": true, -} - -// getSignedChunkLength - calculates the length of chunk metadata -func getSignedChunkLength(chunkDataSize int64) int64 { - return int64(len(fmt.Sprintf("%x", chunkDataSize))) + - chunkSigConstLen + - signatureStrLen + - crlfLen + - chunkDataSize + - crlfLen -} - -// getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { - if dataLen <= 0 { - return 0 - } - - chunksCount := int64(dataLen / chunkSize) - remainingBytes := int64(dataLen % chunkSize) - streamLen := int64(0) - streamLen += chunksCount * getSignedChunkLength(chunkSize) - if remainingBytes > 0 { - streamLen += getSignedChunkLength(remainingBytes) - } - streamLen += getSignedChunkLength(0) - return streamLen -} - -// buildChunkStringToSign - returns the string to sign given chunk data -// and previous signature. -func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { - stringToSignParts := []string{ - streamingPayloadHdr, - t.Format(iso8601DateFormat), - getScope(region, t), - previousSig, - emptySHA256, - hex.EncodeToString(sum256(chunkData)), - } - - return strings.Join(stringToSignParts, "\n") -} - -// prepareStreamingRequest - prepares a request with appropriate -// headers before computing the seed signature. -func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { - // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) - // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) - req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) -} - -// buildChunkHeader - returns the chunk header. -// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n -func buildChunkHeader(chunkLen int64, signature string) []byte { - return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") -} - -// buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildChunkSignature(chunkData []byte, reqTime time.Time, region, - previousSignature, secretAccessKey string) string { - - chunkStringToSign := buildChunkStringToSign(reqTime, region, - previousSignature, chunkData) - signingKey := getSigningKey(secretAccessKey, region, reqTime) - return getSignature(signingKey, chunkStringToSign) -} - -// getSeedSignature - returns the seed signature for a given request. -func (s *StreamingReader) setSeedSignature(req *http.Request) { - // Get canonical request - canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) - - signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) - - // Calculate signature. - s.seedSignature = getSignature(signingKey, stringToSign) -} - -// StreamingReader implements chunked upload signature as a reader on -// top of req.Body's ReaderCloser chunk header;data;... repeat -type StreamingReader struct { - accessKeyID string - secretAccessKey string - sessionToken string - region string - prevSignature string - seedSignature string - contentLen int64 // Content-Length from req header - baseReadCloser io.ReadCloser // underlying io.Reader - bytesRead int64 // bytes read from underlying io.Reader - buf bytes.Buffer // holds signed chunk - chunkBuf []byte // holds raw data read from req Body - chunkBufLen int // no. of bytes read so far into chunkBuf - done bool // done reading the underlying reader to EOF - reqTime time.Time - chunkNum int - totalChunks int - lastChunkSize int -} - -// signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { - // Compute chunk signature for next header - signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, - s.region, s.prevSignature, s.secretAccessKey) - - // For next chunk signature computation - s.prevSignature = signature - - // Write chunk header into streaming buffer - chunkHdr := buildChunkHeader(int64(chunkLen), signature) - s.buf.Write(chunkHdr) - - // Write chunk data into streaming buffer - s.buf.Write(s.chunkBuf[:chunkLen]) - - // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) - - // Reset chunkBufLen for next chunk read. - s.chunkBufLen = 0 - s.chunkNum++ -} - -// setStreamingAuthHeader - builds and sets authorization header value -// for streaming signature. -func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { - credential := GetCredential(s.accessKeyID, s.region, s.reqTime) - authParts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), - "Signature=" + s.seedSignature, - } - - // Set authorization header. - auth := strings.Join(authParts, ",") - req.Header.Set("Authorization", auth) -} - -// StreamingSignV4 - provides chunked upload signatureV4 support by -// implementing io.Reader. -func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time) *http.Request { - - // Set headers needed for streaming signature. - prepareStreamingRequest(req, sessionToken, dataLen, reqTime) - - if req.Body == nil { - req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) - } - - stReader := &StreamingReader{ - baseReadCloser: req.Body, - accessKeyID: accessKeyID, - secretAccessKey: secretAccessKey, - sessionToken: sessionToken, - region: region, - reqTime: reqTime, - chunkBuf: make([]byte, payloadChunkSize), - contentLen: dataLen, - chunkNum: 1, - totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, - lastChunkSize: int(dataLen % payloadChunkSize), - } - - // Add the request headers required for chunk upload signing. - - // Compute the seed signature. - stReader.setSeedSignature(req) - - // Set the authorization header with the seed signature. - stReader.setStreamingAuthHeader(req) - - // Set seed signature as prevSignature for subsequent - // streaming signing process. - stReader.prevSignature = stReader.seedSignature - req.Body = stReader - - return req -} - -// Read - this method performs chunk upload signature providing a -// io.Reader interface. -func (s *StreamingReader) Read(buf []byte) (int, error) { - switch { - // After the last chunk is read from underlying reader, we - // never re-fill s.buf. - case s.done: - - // s.buf will be (re-)filled with next chunk when has lesser - // bytes than asked for. - case s.buf.Len() < len(buf): - s.chunkBufLen = 0 - for { - n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) - // Usually we validate `err` first, but in this case - // we are validating n > 0 for the following reasons. - // - // 1. n > 0, err is one of io.EOF, nil (near end of stream) - // A Reader returning a non-zero number of bytes at the end - // of the input stream may return either err == EOF or err == nil - // - // 2. n == 0, err is io.EOF (actual end of stream) - // - // Callers should always process the n > 0 bytes returned - // before considering the error err. - if n1 > 0 { - s.chunkBufLen += n1 - s.bytesRead += int64(n1) - - if s.chunkBufLen == payloadChunkSize || - (s.chunkNum == s.totalChunks-1 && - s.chunkBufLen == s.lastChunkSize) { - // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) - break - } - } - if err != nil { - if err == io.EOF { - // No more data left in baseReader - last chunk. - // Done reading the last chunk from baseReader. - s.done = true - - // bytes read from baseReader different than - // content length provided. - if s.bytesRead != s.contentLen { - return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) - } - - // Sign the chunk and write it to s.buf. - s.signChunk(0) - break - } - return 0, err - } - - } - } - return s.buf.Read(buf) -} - -// Close - this method makes underlying io.ReadCloser's Close method available. -func (s *StreamingReader) Close() error { - return s.baseReadCloser.Close() -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v2.go deleted file mode 100644 index 40ba07130..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v2.go +++ /dev/null @@ -1,316 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "bytes" - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV2Algorithm = "AWS" -) - -// Encode input URL path to URL encoded path. -func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { - if virtualHost { - reqHost := getHostAddr(req) - dotPos := strings.Index(reqHost, ".") - if dotPos > -1 { - bucketName := reqHost[:dotPos] - path = "/" + bucketName - path += req.URL.Path - path = s3utils.EncodePath(path) - return - } - } - path = s3utils.EncodePath(req.URL.Path) - return -} - -// PreSignV2 - presign the request in following style. -// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. -func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - d := time.Now().UTC() - // Find epoch expires when the request will expire. - epochExpires := d.Unix() + expires - - // Add expires header if not present. - if expiresStr := req.Header.Get("Expires"); expiresStr == "" { - req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) - } - - // Get presigned string to sign. - stringToSign := preStringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Calculate signature. - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - - query := req.URL.Query() - // Handle specially for Google Cloud Storage. - if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { - query.Set("GoogleAccessId", accessKeyID) - } else { - query.Set("AWSAccessKeyId", accessKeyID) - } - - // Fill in Expires for presigned query. - query.Set("Expires", strconv.FormatInt(epochExpires, 10)) - - // Encode query and save. - req.URL.RawQuery = s3utils.QueryEncode(query) - - // Save signature finally. - req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) - - // Return. - return &req -} - -// PostPresignSignatureV2 - presigned signature for PostPolicy -// request. -func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(policyBase64)) - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - return signature -} - -// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; -// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -// -// CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -// -// CanonicalizedProtocolHeaders = - -// SignV2 sign the request before Do() (AWS Signature Version 2). -func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - d := time.Now().UTC() - - // Add date if not present. - if date := req.Header.Get("Date"); date == "" { - req.Header.Set("Date", d.Format(http.TimeFormat)) - } - - // Calculate HMAC for secretAccessKey. - stringToSign := stringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Prepare auth header. - authHeader := new(bytes.Buffer) - authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) - encoder := base64.NewEncoder(base64.StdEncoding, authHeader) - encoder.Write(hm.Sum(nil)) - encoder.Close() - - // Set Authorization header. - req.Header.Set("Authorization", authHeader.String()) - - return &req -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func preStringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writePreSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writePreSignV2Headers - write preSign v2 required headers. -func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Expires") + "\n") -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func stringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writeSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writeSignV2Headers - write signV2 required headers. -func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Date") + "\n") -} - -// writeCanonicalizedHeaders - write canonicalized headers. -func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { - var protoHeaders []string - vals := make(map[string][]string) - for k, vv := range req.Header { - // All the AMZ headers should be lowercase - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-amz") { - protoHeaders = append(protoHeaders, lk) - vals[lk] = vv - } - } - sort.Strings(protoHeaders) - for _, k := range protoHeaders { - buf.WriteString(k) - buf.WriteByte(':') - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - if strings.Contains(v, "\n") { - // TODO: "Unfold" long headers that - // span multiple lines (as allowed by - // RFC 2616, section 4.2) by replacing - // the folding white-space (including - // new-line) by a single space. - buf.WriteString(v) - } else { - buf.WriteString(v) - } - } - buf.WriteByte('\n') - } -} - -// AWS S3 Signature V2 calculation rule is give here: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign - -// Whitelist resource list that will be used in query string for signature-V2 calculation. -// The list should be alphabetically sorted -var resourceList = []string{ - "acl", - "delete", - "lifecycle", - "location", - "logging", - "notification", - "partNumber", - "policy", - "requestPayment", - "response-cache-control", - "response-content-disposition", - "response-content-encoding", - "response-content-language", - "response-content-type", - "response-expires", - "torrent", - "uploadId", - "uploads", - "versionId", - "versioning", - "versions", - "website", -} - -// From the Amazon docs: -// -// CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { - // Save request URL. - requestURL := req.URL - // Get encoded URL path. - buf.WriteString(encodeURL2Path(&req, virtualHost)) - if requestURL.RawQuery != "" { - var n int - vals, _ := url.ParseQuery(requestURL.RawQuery) - // Verify if any sub resource queries are present, if yes - // canonicallize them. - for _, resource := range resourceList { - if vv, ok := vals[resource]; ok && len(vv) > 0 { - n++ - // First element - switch n { - case 1: - buf.WriteByte('?') - // The rest - default: - buf.WriteByte('&') - } - buf.WriteString(resource) - // Request parameters - if len(vv[0]) > 0 { - buf.WriteByte('=') - buf.WriteString(vv[0]) - } - } - } - } -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v4.go deleted file mode 100644 index ab96b58c5..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v4.go +++ /dev/null @@ -1,315 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "bytes" - "encoding/hex" - "net/http" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" - yyyymmdd = "20060102" -) - -/// -/// Excerpts from @lsegal - -/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes -/// problems with generating pre-signed URLs (that are executed -/// by other agents) or when customers pass requests through -/// proxies, which may modify the user-agent. -/// -/// Content-Length: -/// -/// This is ignored from signing because generating a pre-signed -/// URL should not provide a content-length constraint, -/// specifically when vending a S3 pre-signed PUT URL. The -/// corollary to this is that when sending regular requests -/// (non-pre-signed), the signature contains a checksum of the -/// body, which implicitly validates the payload length (since -/// changing the number of bytes would change the checksum) -/// and therefore this header is not valuable in the signature. -/// -/// Content-Type: -/// -/// Signing this header causes quite a number of problems in -/// browser environments, where browsers like to modify and -/// normalize the content-type header in different ways. There is -/// more information on this in https://goo.gl/2E9gyy. Avoiding -/// this field simplifies logic and reduces the possibility of -/// future bugs. -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// -var v4IgnoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, -} - -// getSigningKey hmac seed to calculate final signature. -func getSigningKey(secret, loc string, t time.Time) []byte { - date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) - location := sumHMAC(date, []byte(loc)) - service := sumHMAC(location, []byte("s3")) - signingKey := sumHMAC(service, []byte("aws4_request")) - return signingKey -} - -// getSignature final signature in hexadecimal form. -func getSignature(signingKey []byte, stringToSign string) string { - return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) -} - -// getScope generate a string of a specific date, an AWS region, and a -// service. -func getScope(location string, t time.Time) string { - scope := strings.Join([]string{ - t.Format(yyyymmdd), - location, - "s3", - "aws4_request", - }, "/") - return scope -} - -// GetCredential generate a credential string. -func GetCredential(accessKeyID, location string, t time.Time) string { - scope := getScope(location, t) - return accessKeyID + "/" + scope -} - -// getHashedPayload get the hexadecimal value of the SHA256 hash of -// the request payload. -func getHashedPayload(req http.Request) string { - hashedPayload := req.Header.Get("X-Amz-Content-Sha256") - if hashedPayload == "" { - // Presign does not have a payload, use S3 recommended value. - hashedPayload = unsignedPayload - } - return hashedPayload -} - -// getCanonicalHeaders generate a list of request headers for -// signature. -func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - vals := make(map[string][]string) - for k, vv := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - vals[strings.ToLower(k)] = vv - } - headers = append(headers, "host") - sort.Strings(headers) - - var buf bytes.Buffer - // Save all the headers in canonical form
: newline - // separated for each header. - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - switch { - case k == "host": - buf.WriteString(getHostAddr(&req)) - fallthrough - default: - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(signV4TrimAll(v)) - } - buf.WriteByte('\n') - } - } - return buf.String() -} - -// getSignedHeaders generate all signed request headers. -// i.e lexically sorted, semicolon-separated list of lowercase -// request header names. -func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - for k := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // Ignored header found continue. - } - headers = append(headers, strings.ToLower(k)) - } - headers = append(headers, "host") - sort.Strings(headers) - return strings.Join(headers, ";") -} - -// getCanonicalRequest generate a canonical request of style. -// -// canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// -func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { - req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) - canonicalRequest := strings.Join([]string{ - req.Method, - s3utils.EncodePath(req.URL.Path), - req.URL.RawQuery, - getCanonicalHeaders(req, ignoredHeaders), - getSignedHeaders(req, ignoredHeaders), - getHashedPayload(req), - }, "\n") - return canonicalRequest -} - -// getStringToSign a string based on selected query values. -func getStringToSignV4(t time.Time, location, canonicalRequest string) string { - stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(location, t) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) - return stringToSign -} - -// PreSignV4 presign the request, in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. -func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Get credential string. - credential := GetCredential(accessKeyID, location, t) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Set URL query. - query := req.URL.Query() - query.Set("X-Amz-Algorithm", signV4Algorithm) - query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) - query.Set("X-Amz-SignedHeaders", signedHeaders) - query.Set("X-Amz-Credential", credential) - // Set session token if available. - if sessionToken != "" { - query.Set("X-Amz-Security-Token", sessionToken) - } - req.URL.RawQuery = query.Encode() - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest) - - // Gext hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // Add signature header to RawQuery. - req.URL.RawQuery += "&X-Amz-Signature=" + signature - - return &req -} - -// PostPresignSignatureV4 - presigned signature for PostPolicy -// requests. -func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { - // Get signining key. - signingkey := getSigningKey(secretAccessKey, location, t) - // Calculate signature. - signature := getSignature(signingkey, policyBase64) - return signature -} - -// SignV4 sign the request before Do(), in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Set x-amz-date. - req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - - // Set session token if available. - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest) - - // Get hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t) - - // Get credential string. - credential := GetCredential(accessKeyID, location, t) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // If regular request, construct the final authorization header. - parts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + signedHeaders, - "Signature=" + signature, - } - - // Set authorization header. - auth := strings.Join(parts, ", ") - req.Header.Set("Authorization", auth) - - return &req -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/utils.go deleted file mode 100644 index 934e33a48..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/utils.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "crypto/hmac" - "net/http" - "strings" - - "github.com/minio/sha256-simd" -) - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// getHostAddr returns host header if available, otherwise returns host from URL -func getHostAddr(req *http.Request) string { - if req.Host != "" { - return req.Host - } - return req.URL.Host -} - -// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() -// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -func signV4TrimAll(input string) string { - // Compress adjacent spaces (a space is determined by - // unicode.IsSpace() internally here) to one space and return - return strings.Join(strings.Fields(input), " ") -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v6/pkg/s3utils/utils.go deleted file mode 100644 index 9af2997b7..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/s3utils/utils.go +++ /dev/null @@ -1,331 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3utils - -import ( - "bytes" - "encoding/hex" - "errors" - "net" - "net/url" - "regexp" - "sort" - "strings" - "unicode/utf8" -) - -// Sentinel URL is the default url value which is invalid. -var sentinelURL = url.URL{} - -// IsValidDomain validates if input string is a valid domain name. -func IsValidDomain(host string) bool { - // See RFC 1035, RFC 3696. - host = strings.TrimSpace(host) - if len(host) == 0 || len(host) > 255 { - return false - } - // host cannot start or end with "-" - if host[len(host)-1:] == "-" || host[:1] == "-" { - return false - } - // host cannot start or end with "_" - if host[len(host)-1:] == "_" || host[:1] == "_" { - return false - } - // host cannot start with a "." - if host[:1] == "." { - return false - } - // All non alphanumeric characters are invalid. - if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { - return parts[1] - } - parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. -func IsAmazonEndpoint(endpointURL url.URL) bool { - if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { - return true - } - return GetRegionFromURL(endpointURL) != "" -} - -// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. -func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || - IsAmazonFIPSGovCloudEndpoint(endpointURL)) -} - -// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" -} - -// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - switch endpointURL.Host { - case "s3-fips.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-west-1.amazonaws.com": - case "s3-fips.dualstack.us-west-2.amazonaws.com": - case "s3-fips.dualstack.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-east-1.amazonaws.com": - case "s3-fips.us-west-1.amazonaws.com": - case "s3-fips.us-west-2.amazonaws.com": - case "s3-fips.us-east-1.amazonaws.com": - default: - return false - } - return true -} - -// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { - return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) -} - -// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. -func IsGoogleEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return endpointURL.Host == "storage.googleapis.com" -} - -// Expects ascii encoded strings - from output of urlEncodePath -func percentEncodeSlash(s string) string { - return strings.Replace(s, "/", "%2F", -1) -} - -// QueryEncode - encodes query values in their URL encoded form. In -// addition to the percent encoding performed by urlEncodePath() used -// here, it also percent encodes '/' (forward slash) -func QueryEncode(v url.Values) string { - if v == nil { - return "" - } - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := percentEncodeSlash(EncodePath(k)) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(percentEncodeSlash(EncodePath(v))) - } - } - return buf.String() -} - -// if object matches reserved string, no need to encode them -var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - -// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func EncodePath(pathName string) string { - if reservedObjectNames.MatchString(pathName) { - return pathName - } - var encodedPathname string - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } - } - } - return encodedPathname -} - -// We support '.' with bucket names but we fallback to using path -// style requests instead for such buckets. -var ( - validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) - validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) - ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) -) - -// Common checker for both stricter and basic validation. -func checkBucketNameCommon(bucketName string, strict bool) (err error) { - if strings.TrimSpace(bucketName) == "" { - return errors.New("Bucket name cannot be empty") - } - if len(bucketName) < 3 { - return errors.New("Bucket name cannot be smaller than 3 characters") - } - if len(bucketName) > 63 { - return errors.New("Bucket name cannot be greater than 63 characters") - } - if ipAddress.MatchString(bucketName) { - return errors.New("Bucket name cannot be an ip address") - } - if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { - return errors.New("Bucket name contains invalid characters") - } - if strict { - if !validBucketNameStrict.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err - } - if !validBucketName.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err -} - -// CheckValidBucketName - checks if we have a valid input bucket name. -func CheckValidBucketName(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, false) -} - -// CheckValidBucketNameStrict - checks if we have a valid input bucket name. -// This is a stricter version. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func CheckValidBucketNameStrict(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, true) -} - -// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectNamePrefix(objectName string) error { - if len(objectName) > 1024 { - return errors.New("Object name cannot be greater than 1024 characters") - } - if !utf8.ValidString(objectName) { - return errors.New("Object name with non UTF-8 strings are not supported") - } - return nil -} - -// CheckValidObjectName - checks if we have a valid input object name. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectName(objectName string) error { - if strings.TrimSpace(objectName) == "" { - return errors.New("Object name cannot be empty") - } - return CheckValidObjectNamePrefix(objectName) -} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v6/pkg/set/stringset.go deleted file mode 100644 index e220271bb..000000000 --- a/vendor/github.com/minio/minio-go/v6/pkg/set/stringset.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package set - -import ( - "encoding/json" - "fmt" - "sort" -) - -// StringSet - uses map as set of strings. -type StringSet map[string]struct{} - -// ToSlice - returns StringSet as string slice. -func (set StringSet) ToSlice() []string { - keys := make([]string, 0, len(set)) - for k := range set { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// IsEmpty - returns whether the set is empty or not. -func (set StringSet) IsEmpty() bool { - return len(set) == 0 -} - -// Add - adds string to the set. -func (set StringSet) Add(s string) { - set[s] = struct{}{} -} - -// Remove - removes string in the set. It does nothing if string does not exist in the set. -func (set StringSet) Remove(s string) { - delete(set, s) -} - -// Contains - checks if string is in the set. -func (set StringSet) Contains(s string) bool { - _, ok := set[s] - return ok -} - -// FuncMatch - returns new set containing each value who passes match function. -// A 'matchFn' should accept element in a set as first argument and -// 'matchString' as second argument. The function can do any logic to -// compare both the arguments and should return true to accept element in -// a set to include in output set else the element is ignored. -func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { - nset := NewStringSet() - for k := range set { - if matchFn(k, matchString) { - nset.Add(k) - } - } - return nset -} - -// ApplyFunc - returns new set containing each value processed by 'applyFn'. -// A 'applyFn' should accept element in a set as a argument and return -// a processed string. The function can do any logic to return a processed -// string. -func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(applyFn(k)) - } - return nset -} - -// Equals - checks whether given set is equal to current set or not. -func (set StringSet) Equals(sset StringSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(set) != len(sset) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range set { - if _, ok := sset[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns the intersection with given set as new set. -func (set StringSet) Intersection(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// Difference - returns the difference with given set as new set. -func (set StringSet) Difference(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; !ok { - nset.Add(k) - } - } - - return nset -} - -// Union - returns the union with given set as new set. -func (set StringSet) Union(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(k) - } - - for k := range sset { - nset.Add(k) - } - - return nset -} - -// MarshalJSON - converts to JSON data. -func (set StringSet) MarshalJSON() ([]byte, error) { - return json.Marshal(set.ToSlice()) -} - -// UnmarshalJSON - parses JSON data and creates new set with it. -// If 'data' contains JSON string array, the set contains each string. -// If 'data' contains JSON string, the set contains the string as one element. -// If 'data' contains Other JSON types, JSON parse error is returned. -func (set *StringSet) UnmarshalJSON(data []byte) error { - sl := []string{} - var err error - if err = json.Unmarshal(data, &sl); err == nil { - *set = make(StringSet) - for _, s := range sl { - set.Add(s) - } - } else { - var s string - if err = json.Unmarshal(data, &s); err == nil { - *set = make(StringSet) - set.Add(s) - } - } - - return err -} - -// String - returns printable string of the set. -func (set StringSet) String() string { - return fmt.Sprintf("%s", set.ToSlice()) -} - -// NewStringSet - creates new string set. -func NewStringSet() StringSet { - return make(StringSet) -} - -// CreateStringSet - creates new string set with given string values. -func CreateStringSet(sl ...string) StringSet { - set := make(StringSet) - for _, k := range sl { - set.Add(k) - } - return set -} - -// CopyStringSet - returns copy of given set. -func CopyStringSet(set StringSet) StringSet { - nset := NewStringSet() - for k, v := range set { - nset[k] = v - } - return nset -} diff --git a/vendor/github.com/minio/minio-go/v6/post-policy.go b/vendor/github.com/minio/minio-go/v6/post-policy.go deleted file mode 100644 index f9250b2a8..000000000 --- a/vendor/github.com/minio/minio-go/v6/post-policy.go +++ /dev/null @@ -1,270 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/base64" - "fmt" - "strings" - "time" -) - -// expirationDateFormat date format for expiration key in json policy. -const expirationDateFormat = "2006-01-02T15:04:05.999Z" - -// policyCondition explanation: -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html -// -// Example: -// -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -// -type policyCondition struct { - matchType string - condition string - value string -} - -// PostPolicy - Provides strict static type conversion and validation -// for Amazon S3's POST policy JSON string. -type PostPolicy struct { - // Expiration date and time of the POST policy. - expiration time.Time - // Collection of different policy conditions. - conditions []policyCondition - // ContentLengthRange minimum and maximum allowable size for the - // uploaded content. - contentLengthRange struct { - min int64 - max int64 - } - - // Post form data. - formData map[string]string -} - -// NewPostPolicy - Instantiate new post policy. -func NewPostPolicy() *PostPolicy { - p := &PostPolicy{} - p.conditions = make([]policyCondition, 0) - p.formData = make(map[string]string) - return p -} - -// SetExpires - Sets expiration time for the new policy. -func (p *PostPolicy) SetExpires(t time.Time) error { - if t.IsZero() { - return ErrInvalidArgument("No expiry time set.") - } - p.expiration = t - return nil -} - -// SetKey - Sets an object name for the policy based upload. -func (p *PostPolicy) SetKey(key string) error { - if strings.TrimSpace(key) == "" || key == "" { - return ErrInvalidArgument("Object name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$key", - value: key, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = key - return nil -} - -// SetKeyStartsWith - Sets an object name that an policy based upload -// can start with. -func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return ErrInvalidArgument("Object prefix is empty.") - } - policyCond := policyCondition{ - matchType: "starts-with", - condition: "$key", - value: keyStartsWith, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = keyStartsWith - return nil -} - -// SetBucket - Sets bucket at which objects will be uploaded to. -func (p *PostPolicy) SetBucket(bucketName string) error { - if strings.TrimSpace(bucketName) == "" || bucketName == "" { - return ErrInvalidArgument("Bucket name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$bucket", - value: bucketName, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["bucket"] = bucketName - return nil -} - -// SetContentType - Sets content-type of the object for this policy -// based upload. -func (p *PostPolicy) SetContentType(contentType string) error { - if strings.TrimSpace(contentType) == "" || contentType == "" { - return ErrInvalidArgument("No content type specified.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$Content-Type", - value: contentType, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["Content-Type"] = contentType - return nil -} - -// SetContentLengthRange - Set new min and max content length -// condition for all incoming uploads. -func (p *PostPolicy) SetContentLengthRange(min, max int64) error { - if min > max { - return ErrInvalidArgument("Minimum limit is larger than maximum limit.") - } - if min < 0 { - return ErrInvalidArgument("Minimum limit cannot be negative.") - } - if max < 0 { - return ErrInvalidArgument("Maximum limit cannot be negative.") - } - p.contentLengthRange.min = min - p.contentLengthRange.max = max - return nil -} - -// SetSuccessStatusAction - Sets the status success code of the object for this policy -// based upload. -func (p *PostPolicy) SetSuccessStatusAction(status string) error { - if strings.TrimSpace(status) == "" || status == "" { - return ErrInvalidArgument("Status is empty") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$success_action_status", - value: status, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["success_action_status"] = status - return nil -} - -// SetUserMetadata - Set user metadata as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserMetadata(key string, value string) error { - if strings.TrimSpace(key) == "" || key == "" { - return ErrInvalidArgument("Key is empty") - } - if strings.TrimSpace(value) == "" || value == "" { - return ErrInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-meta-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// SetUserData - Set user data as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserData(key string, value string) error { - if key == "" { - return ErrInvalidArgument("Key is empty") - } - if value == "" { - return ErrInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// addNewPolicy - internal helper to validate adding new policies. -func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { - return ErrInvalidArgument("Policy fields are empty.") - } - p.conditions = append(p.conditions, policyCond) - return nil -} - -// Stringer interface for printing policy in json formatted string. -func (p PostPolicy) String() string { - return string(p.marshalJSON()) -} - -// marshalJSON - Provides Marshalled JSON in bytes. -func (p PostPolicy) marshalJSON() []byte { - expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` - var conditionsStr string - conditions := []string{} - for _, po := range p.conditions { - conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) - } - if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { - conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", - p.contentLengthRange.min, p.contentLengthRange.max)) - } - if len(conditions) > 0 { - conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" - } - retStr := "{" - retStr = retStr + expirationStr + "," - retStr = retStr + conditionsStr - retStr = retStr + "}" - return []byte(retStr) -} - -// base64 - Produces base64 of PostPolicy's Marshalled json. -func (p PostPolicy) base64() string { - return base64.StdEncoding.EncodeToString(p.marshalJSON()) -} diff --git a/vendor/github.com/minio/minio-go/v6/retry-continous.go b/vendor/github.com/minio/minio-go/v6/retry-continous.go deleted file mode 100644 index 3d25883b0..000000000 --- a/vendor/github.com/minio/minio-go/v6/retry-continous.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "time" - -// newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { - attemptCh := make(chan int) - - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // 1< maxAttempt { - attempt = maxAttempt - } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - var nextBackoff int - for { - select { - // Attempts starts. - case attemptCh <- nextBackoff: - nextBackoff++ - case <-doneCh: - // Stop the routine. - return - } - time.Sleep(exponentialBackoffWait(nextBackoff)) - } - }() - return attemptCh -} diff --git a/vendor/github.com/minio/minio-go/v6/retry.go b/vendor/github.com/minio/minio-go/v6/retry.go deleted file mode 100644 index 2c608bab5..000000000 --- a/vendor/github.com/minio/minio-go/v6/retry.go +++ /dev/null @@ -1,157 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// MaxRetry is the maximum number of retries before stopping. -var MaxRetry = 10 - -// MaxJitter will randomize over the full exponential backoff time -const MaxJitter = 1.0 - -// NoJitter disables the use of jitter for randomizing the exponential backoff time -const NoJitter = 0.0 - -// DefaultRetryUnit - default unit multiplicative per retry. -// defaults to 1 second. -const DefaultRetryUnit = time.Second - -// DefaultRetryCap - Each retry attempt never waits no longer than -// this maximum time duration. -const DefaultRetryCap = time.Second * 30 - -// newRetryTimer creates a timer with exponentially increasing -// delays until the maximum retry attempts are reached. -func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { - attemptCh := make(chan int) - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - for i := 0; i < maxRetry; i++ { - select { - // Attempts start from 1. - case attemptCh <- i + 1: - case <-doneCh: - // Stop the routine. - return - } - time.Sleep(exponentialBackoffWait(i)) - } - }() - return attemptCh -} - -// isHTTPReqErrorRetryable - is http requests error retryable, such -// as i/o timeout, connection broken etc.. -func isHTTPReqErrorRetryable(err error) bool { - if err == nil { - return false - } - switch e := err.(type) { - case *url.Error: - switch e.Err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - } - if strings.Contains(err.Error(), "Connection closed by foreign host") { - return true - } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { - // If error is - tlsHandshakeTimeoutError, retry. - return true - } else if strings.Contains(err.Error(), "i/o timeout") { - // If error is - tcp timeoutError, retry. - return true - } else if strings.Contains(err.Error(), "connection timed out") { - // If err is a net.Dial timeout, retry. - return true - } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { - // If error is transport connection broken, retry. - return true - } else if strings.Contains(err.Error(), "net/http: timeout awaiting response headers") { - // Retry errors due to server not sending the response before timeout - return true - } - } - return false -} - -// List of AWS S3 error codes which are retryable. -var retryableS3Codes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "InternalError": {}, - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "SlowDown": {}, - // Add more AWS S3 codes here. -} - -// isS3CodeRetryable - is s3 error code retryable. -func isS3CodeRetryable(s3Code string) (ok bool) { - _, ok = retryableS3Codes[s3Code] - return ok -} - -// List of HTTP status codes which are retryable. -var retryableHTTPStatusCodes = map[int]struct{}{ - 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet - http.StatusInternalServerError: {}, - http.StatusBadGateway: {}, - http.StatusServiceUnavailable: {}, - http.StatusGatewayTimeout: {}, - // Add more HTTP status codes here. -} - -// isHTTPStatusRetryable - is HTTP error code retryable. -func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { - _, ok = retryableHTTPStatusCodes[httpStatusCode] - return ok -} diff --git a/vendor/github.com/minio/minio-go/v6/s3-endpoints.go b/vendor/github.com/minio/minio-go/v6/s3-endpoints.go deleted file mode 100644 index 989f58c7e..000000000 --- a/vendor/github.com/minio/minio-go/v6/s3-endpoints.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// awsS3EndpointMap Amazon S3 endpoint map. -var awsS3EndpointMap = map[string]string{ - "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", - "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", - "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", - "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", - "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", - "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", - "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", - "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", - "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", - "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", - "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", - "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", - "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", - "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", - "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", - "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", - "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", - "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", - "cn-northwest-1": "s3.cn-northwest-1.amazonaws.com.cn", -} - -// getS3Endpoint get Amazon S3 endpoint based on the bucket location. -func getS3Endpoint(bucketLocation string) (s3Endpoint string) { - s3Endpoint, ok := awsS3EndpointMap[bucketLocation] - if !ok { - // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. - s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" - } - return s3Endpoint -} diff --git a/vendor/github.com/minio/minio-go/v6/s3-error.go b/vendor/github.com/minio/minio-go/v6/s3-error.go deleted file mode 100644 index f365157ee..000000000 --- a/vendor/github.com/minio/minio-go/v6/s3-error.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// Non exhaustive list of AWS S3 standard error responses - -// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -var s3ErrorResponseMap = map[string]string{ - "AccessDenied": "Access Denied.", - "BadDigest": "The Content-Md5 you specified did not match what we received.", - "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", - "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", - "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", - "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", - "InvalidBucketName": "The specified bucket is not valid.", - "InvalidDigest": "The Content-Md5 you specified is not valid.", - "InvalidRange": "The requested range is not satisfiable", - "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", - "MissingContentLength": "You must provide the Content-Length HTTP header.", - "MissingContentMD5": "Missing required header for this request: Content-Md5.", - "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist.", - "NoSuchBucketPolicy": "The bucket policy does not exist", - "NoSuchKey": "The specified key does not exist.", - "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - "NotImplemented": "A header you provided implies functionality that is not implemented", - "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", - "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", - "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - "MethodNotAllowed": "The specified method is not allowed against this resource.", - "InvalidPart": "One or more of the specified parts could not be found.", - "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - "InvalidObjectState": "The operation is not valid for the current state of the object.", - "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", - "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", - "BucketNotEmpty": "The bucket you tried to delete is not empty", - "AllAccessDisabled": "All access to this bucket has been disabled.", - "MalformedPolicy": "Policy has invalid resource.", - "MissingFields": "Missing fields in request.", - "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", - "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", - "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", - "InvalidDuration": "Duration provided in the request is invalid.", - "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", - // Add new API errors here. -} diff --git a/vendor/github.com/minio/minio-go/v6/staticcheck.conf b/vendor/github.com/minio/minio-go/v6/staticcheck.conf deleted file mode 100644 index 71cc6f536..000000000 --- a/vendor/github.com/minio/minio-go/v6/staticcheck.conf +++ /dev/null @@ -1 +0,0 @@ -checks = ["all", "-ST1005", "-ST1017", "-SA9004", "-ST1000", "-S1021"] \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v6/transport.go b/vendor/github.com/minio/minio-go/v6/transport.go deleted file mode 100644 index 34efa8980..000000000 --- a/vendor/github.com/minio/minio-go/v6/transport.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build go1.7 go1.8 - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/tls" - "crypto/x509" - "net" - "net/http" - "time" - - "golang.org/x/net/http2" -) - -// DefaultTransport - this default transport is similar to -// http.DefaultTransport but with additional param DisableCompression -// is set to true to avoid decompressing content with 'gzip' encoding. -var DefaultTransport = func(secure bool) (http.RoundTripper, error) { - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 1024, - MaxIdleConnsPerHost: 1024, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, - } - - if secure { - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - // In some systems (like Windows) system cert pool is - // not supported or no certificates are present on the - // system - so we create a new cert pool. - rootCAs = x509.NewCertPool() - } - - // Keep TLS config. - tlsConfig := &tls.Config{ - RootCAs: rootCAs, - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - MinVersion: tls.VersionTLS12, - } - tr.TLSClientConfig = tlsConfig - - // Because we create a custom TLSClientConfig, we have to opt-in to HTTP/2. - // See https://github.com/golang/go/issues/14275 - if err := http2.ConfigureTransport(tr); err != nil { - return nil, err - } - } - return tr, nil -} diff --git a/vendor/github.com/minio/minio-go/v6/utils.go b/vendor/github.com/minio/minio-go/v6/utils.go deleted file mode 100644 index d24cfb5c7..000000000 --- a/vendor/github.com/minio/minio-go/v6/utils.go +++ /dev/null @@ -1,275 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "regexp" - "strings" - "time" - - "github.com/minio/sha256-simd" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}) error { - d := xml.NewDecoder(body) - return d.Decode(v) -} - -// sum256 calculate sha256sum for an input byte array, returns hex encoded. -func sum256Hex(data []byte) string { - hash := sha256.New() - hash.Write(data) - return hex.EncodeToString(hash.Sum(nil)) -} - -// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. -func sumMD5Base64(data []byte) string { - hash := md5.New() - hash.Write(data) - return base64.StdEncoding.EncodeToString(hash.Sum(nil)) -} - -// getEndpointURL - construct a new endpoint. -func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { - if strings.Contains(endpoint, ":") { - host, _, err := net.SplitHostPort(endpoint) - if err != nil { - return nil, err - } - if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) - } - } else { - if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) - } - } - // If secure is false, use 'http' scheme. - scheme := "https" - if !secure { - scheme = "http" - } - - // Construct a secured endpoint URL. - endpointURLStr := scheme + "://" + endpoint - endpointURL, err := url.Parse(endpointURLStr) - if err != nil { - return nil, err - } - - // Validate incoming endpoint URL. - if err := isValidEndpointURL(*endpointURL); err != nil { - return nil, err - } - return endpointURL, nil -} - -// closeResponse close non nil response with any response Body. -// convenient wrapper to drain any remaining data on response body. -// -// Subsequently this allows golang http RoundTripper -// to re-use the same connection for future requests. -func closeResponse(resp *http.Response) { - // Callers should close resp.Body when done reading from it. - // If resp.Body is not closed, the Client's underlying RoundTripper - // (typically Transport) may not be able to re-use a persistent TCP - // connection to the server for a subsequent "keep-alive" request. - if resp != nil && resp.Body != nil { - // Drain any remaining Body and then close the connection. - // Without this closing connection would disallow re-using - // the same connection for future uses. - // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } -} - -var ( - // Hex encoded string of nil sha256sum bytes. - emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - - // Sentinel URL is the default url value which is invalid. - sentinelURL = url.URL{} -) - -// Verify if input endpoint URL is valid. -func isValidEndpointURL(endpointURL url.URL) error { - if endpointURL == sentinelURL { - return ErrInvalidArgument("Endpoint url cannot be empty.") - } - if endpointURL.Path != "/" && endpointURL.Path != "" { - return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") - } - if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { - if !s3utils.IsAmazonEndpoint(endpointURL) { - return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") - } - } - if strings.Contains(endpointURL.Host, ".googleapis.com") { - if !s3utils.IsGoogleEndpoint(endpointURL) { - return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") - } - } - return nil -} - -// Verify if input expires value is valid. -func isValidExpiry(expires time.Duration) error { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 { - return ErrInvalidArgument("Expires cannot be lesser than 1 second.") - } - if expireSeconds > 604800 { - return ErrInvalidArgument("Expires cannot be greater than 7 days.") - } - return nil -} - -// make a copy of http.Header -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// Filter relevant response headers from -// the HEAD, GET http response. The function takes -// a list of headers which are filtered out and -// returned as a new http header. -func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) { - filteredHeader = cloneHeader(header) - for _, key := range filterKeys { - filteredHeader.Del(key) - } - return filteredHeader -} - -// regCred matches credential string in HTTP header -var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") - -// regCred matches signature string in HTTP header -var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") - -// Redact out signature value from authorization string. -func redactSignature(origAuth string) string { - if !strings.HasPrefix(origAuth, signV4Algorithm) { - // Set a temporary redacted auth - return "AWS **REDACTED**:**REDACTED**" - } - - /// Signature V4 authorization header. - - // Strip out accessKeyID from: - // Credential=////aws4_request - newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") - - // Strip out 256-bit signature from: Signature=<256-bit signature> - return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") -} - -// Get default location returns the location based on the input -// URL `u`, if region override is provided then all location -// defaults to regionOverride. -// -// If no other cases match then the location is set to `us-east-1` -// as a last resort. -func getDefaultLocation(u url.URL, regionOverride string) (location string) { - if regionOverride != "" { - return regionOverride - } - region := s3utils.GetRegionFromURL(u) - if region == "" { - region = "us-east-1" - } - return region -} - -var supportedHeaders = []string{ - "content-type", - "cache-control", - "content-encoding", - "content-disposition", - "content-language", - "x-amz-website-redirect-location", - "x-amz-object-lock-mode", - "x-amz-object-lock-retain-until-date", - "expires", - // Add more supported headers here. -} - -// isStorageClassHeader returns true if the header is a supported storage class header -func isStorageClassHeader(headerKey string) bool { - return strings.EqualFold(amzStorageClass, headerKey) -} - -// isStandardHeader returns true if header is a supported header and not a custom header -func isStandardHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, header := range supportedHeaders { - if strings.ToLower(header) == key { - return true - } - } - return false -} - -// sseHeaders is list of server side encryption headers -var sseHeaders = []string{ - "x-amz-server-side-encryption", - "x-amz-server-side-encryption-aws-kms-key-id", - "x-amz-server-side-encryption-context", - "x-amz-server-side-encryption-customer-algorithm", - "x-amz-server-side-encryption-customer-key", - "x-amz-server-side-encryption-customer-key-MD5", -} - -// isSSEHeader returns true if header is a server side encryption header. -func isSSEHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, h := range sseHeaders { - if strings.ToLower(h) == key { - return true - } - } - return false -} - -// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. -func isAmzHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - - return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) -} diff --git a/vendor/github.com/minio/sha256-simd/.gitignore b/vendor/github.com/minio/sha256-simd/.gitignore deleted file mode 100644 index c56069fe2..000000000 --- a/vendor/github.com/minio/sha256-simd/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.test \ No newline at end of file diff --git a/vendor/github.com/minio/sha256-simd/.travis.yml b/vendor/github.com/minio/sha256-simd/.travis.yml deleted file mode 100644 index 4f85db539..000000000 --- a/vendor/github.com/minio/sha256-simd/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -sudo: required -dist: trusty -language: go - -os: -- linux - -go: -- tip -- 1.12.x - -env: -- ARCH=x86_64 -- ARCH=i686 - -matrix: - fast_finish: true - allow_failures: - - go: tip - -script: -- diff -au <(gofmt -d .) <(printf "") -- go test -race -v ./... -- go vet -asmdecl . -- ./test-architectures.sh diff --git a/vendor/github.com/minio/sha256-simd/LICENSE b/vendor/github.com/minio/sha256-simd/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/minio/sha256-simd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/minio/sha256-simd/README.md b/vendor/github.com/minio/sha256-simd/README.md deleted file mode 100644 index 5282d83ad..000000000 --- a/vendor/github.com/minio/sha256-simd/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# sha256-simd - -Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions and AVX2 for Intel and ARM64 for ARM. On AVX512 it provides an up to 8x improvement (over 3 GB/s per core) in comparison to AVX2. SHA Extensions give a performance boost of close to 4x over AVX2. - -## Introduction - -This package is designed as a replacement for `crypto/sha256`. For Intel CPUs it has two flavors for AVX512 and AVX2 (AVX/SSE are also supported). For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement. - -This package uses Golang assembly. The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al. - -## New: Support for Intel SHA Extensions - -Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)). - -``` -$ benchcmp avx2.txt sha-ext.txt -benchmark AVX2 MB/s SHA Ext MB/s speedup -BenchmarkHash5M 514.40 1975.17 3.84x -``` - -Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, the other changes increased everything roughly 50%. - -## Support for AVX512 - -We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU): - -``` -$ benchcmp avx2.txt avx512.txt -benchmark AVX2 MB/s AVX512 MB/s speedup -BenchmarkHash5M 448.62 3498.20 7.80x -``` - -The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide). - -Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message. - -Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice. - -Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion: - -```go -import "github.com/minio/sha256-simd" - -func main() { - server := sha256.NewAvx512Server() - h512 := sha256.NewAvx512(server) - h512.Write(fileBlock) - digest := h512.Sum([]byte{}) -} -``` - -Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance: -* Have many go routines doing SHA256 calculations in parallel. -* Try to Write() messages in multiples of 64 bytes. -* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible). - -More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores. - -## Drop-In Replacement - -The following code snippet shows how you can use `github.com/minio/sha256-simd`. This will automatically select the fastest method for the architecture on which it will be executed. - -```go -import "github.com/minio/sha256-simd" - -func main() { - ... - shaWriter := sha256.New() - io.Copy(shaWriter, file) - ... -} -``` - -## Performance - -Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB. - -| Processor | SIMD | Speed (MB/s) | -| --------------------------------- | ------- | ------------:| -| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 | -| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 | -| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 | -| 3.0 GHz Intel Xeon Platinum 8124M | AVX2 | 449 | -| 3.1 GHz Intel Core i7 | AVX | 362 | -| 3.1 GHz Intel Core i7 | SSE | 299 | - -## asm2plan9s - -In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. - -## Why and benefits - -One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server. - -Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc. - -## ARM SHA Extensions - -The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)). - - ``` - sha256h q2, q3, v9.4s - sha256h2 q3, q4, v9.4s - sha256su0 v5.4s, v6.4s - rev32 v8.16b, v8.16b - add v9.4s, v7.4s, v18.4s - mov v4.16b, v2.16b - sha256h q2, q3, v10.4s - sha256h2 q3, q4, v10.4s - sha256su0 v6.4s, v7.4s - sha256su1 v5.4s, v7.4s, v8.4s - ``` - -### Detailed benchmarks - -Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/). - -``` -minio@minio-arm:$ benchcmp golang.txt arm64.txt -benchmark golang arm64 speedup -BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x -BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x -BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x -BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x -``` - -## License - -Released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -## Contributing - -Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/sha256-simd/appveyor.yml b/vendor/github.com/minio/sha256-simd/appveyor.yml deleted file mode 100644 index a66bfa9f2..000000000 --- a/vendor/github.com/minio/sha256-simd/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Platform. -platform: x64 - -clone_folder: c:\gopath\src\github.com\minio\sha256-simd - -# environment variables -environment: - GOPATH: c:\gopath - GO15VENDOREXPERIMENT: 1 - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - -# to run your custom scripts instead of automatic MSBuild -build_script: - - go test . - - go test -race . - -# to disable automatic tests -test: off - -# to disable deployment -deploy: off diff --git a/vendor/github.com/minio/sha256-simd/cpuid.go b/vendor/github.com/minio/sha256-simd/cpuid.go deleted file mode 100644 index 878ad4638..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid.go +++ /dev/null @@ -1,119 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -// True when SIMD instructions are available. -var avx512 bool -var avx2 bool -var avx bool -var sse bool -var sse2 bool -var sse3 bool -var ssse3 bool -var sse41 bool -var sse42 bool -var popcnt bool -var sha bool -var armSha = haveArmSha() - -func init() { - var _xsave bool - var _osxsave bool - var _avx bool - var _avx2 bool - var _avx512f bool - var _avx512dq bool - // var _avx512pf bool - // var _avx512er bool - // var _avx512cd bool - var _avx512bw bool - var _avx512vl bool - var _sseState bool - var _avxState bool - var _opmaskState bool - var _zmmHI256State bool - var _hi16ZmmState bool - - mfi, _, _, _ := cpuid(0) - - if mfi >= 1 { - _, _, c, d := cpuid(1) - - sse = (d & (1 << 25)) != 0 - sse2 = (d & (1 << 26)) != 0 - sse3 = (c & (1 << 0)) != 0 - ssse3 = (c & (1 << 9)) != 0 - sse41 = (c & (1 << 19)) != 0 - sse42 = (c & (1 << 20)) != 0 - popcnt = (c & (1 << 23)) != 0 - _xsave = (c & (1 << 26)) != 0 - _osxsave = (c & (1 << 27)) != 0 - _avx = (c & (1 << 28)) != 0 - } - - if mfi >= 7 { - _, b, _, _ := cpuid(7) - - _avx2 = (b & (1 << 5)) != 0 - _avx512f = (b & (1 << 16)) != 0 - _avx512dq = (b & (1 << 17)) != 0 - // _avx512pf = (b & (1 << 26)) != 0 - // _avx512er = (b & (1 << 27)) != 0 - // _avx512cd = (b & (1 << 28)) != 0 - _avx512bw = (b & (1 << 30)) != 0 - _avx512vl = (b & (1 << 31)) != 0 - sha = (b & (1 << 29)) != 0 - } - - // Stop here if XSAVE unsupported or not enabled - if !_xsave || !_osxsave { - return - } - - if _xsave && _osxsave { - a, _ := xgetbv(0) - - _sseState = (a & (1 << 1)) != 0 - _avxState = (a & (1 << 2)) != 0 - _opmaskState = (a & (1 << 5)) != 0 - _zmmHI256State = (a & (1 << 6)) != 0 - _hi16ZmmState = (a & (1 << 7)) != 0 - } else { - _sseState = true - } - - // Very unlikely that OS would enable XSAVE and then disable SSE - if !_sseState { - sse = false - sse2 = false - sse3 = false - ssse3 = false - sse41 = false - sse42 = false - } - - if _avxState { - avx = _avx - avx2 = _avx2 - } - - if _opmaskState && _zmmHI256State && _hi16ZmmState { - avx512 = (_avx512f && - _avx512dq && - _avx512bw && - _avx512vl) - } -} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_386.go b/vendor/github.com/minio/sha256-simd/cpuid_386.go deleted file mode 100644 index c9890be47..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_386.go +++ /dev/null @@ -1,24 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func xgetbv(index uint32) (eax, edx uint32) - -func haveArmSha() bool { - return false -} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_386.s b/vendor/github.com/minio/sha256-simd/cpuid_386.s deleted file mode 100644 index 1511cd6f6..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_386.s +++ /dev/null @@ -1,53 +0,0 @@ -// The MIT License (MIT) -// -// Copyright (c) 2015 Klaus Post -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// +build 386,!gccgo - -// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid(SB), 7, $0 - XORL CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+4(FP) - MOVL BX, ebx+8(FP) - MOVL CX, ecx+12(FP) - MOVL DX, edx+16(FP) - RET - -// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·xgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+4(FP) - MOVL DX, edx+8(FP) - RET diff --git a/vendor/github.com/minio/sha256-simd/cpuid_amd64.go b/vendor/github.com/minio/sha256-simd/cpuid_amd64.go deleted file mode 100644 index c9890be47..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func xgetbv(index uint32) (eax, edx uint32) - -func haveArmSha() bool { - return false -} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_amd64.s b/vendor/github.com/minio/sha256-simd/cpuid_amd64.s deleted file mode 100644 index b0f414748..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_amd64.s +++ /dev/null @@ -1,53 +0,0 @@ -// The MIT License (MIT) -// -// Copyright (c) 2015 Klaus Post -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// +build amd64,!gccgo - -// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid(SB), 7, $0 - XORQ CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·xgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/vendor/github.com/minio/sha256-simd/cpuid_arm.go b/vendor/github.com/minio/sha256-simd/cpuid_arm.go deleted file mode 100644 index 351dff4b6..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_arm.go +++ /dev/null @@ -1,32 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func xgetbv(index uint32) (eax, edx uint32) { - return 0, 0 -} - -func haveArmSha() bool { - return false -} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go b/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go deleted file mode 100644 index e739996d9..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build arm64,linux - -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -import ( - "bytes" - "io/ioutil" -) - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func xgetbv(index uint32) (eax, edx uint32) { - return 0, 0 -} - -// File to check for cpu capabilities. -const procCPUInfo = "/proc/cpuinfo" - -// Feature to check for. -const sha256Feature = "sha2" - -func haveArmSha() bool { - cpuInfo, err := ioutil.ReadFile(procCPUInfo) - if err != nil { - return false - } - return bytes.Contains(cpuInfo, []byte(sha256Feature)) -} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go deleted file mode 100644 index 3e4415828..000000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_other.go +++ /dev/null @@ -1,34 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// +build !386,!amd64,!arm,!arm64 arm64,!linux - -package sha256 - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func xgetbv(index uint32) (eax, edx uint32) { - return 0, 0 -} - -func haveArmSha() bool { - return false -} diff --git a/vendor/github.com/minio/sha256-simd/go.mod b/vendor/github.com/minio/sha256-simd/go.mod deleted file mode 100644 index 4451e9eb2..000000000 --- a/vendor/github.com/minio/sha256-simd/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/minio/sha256-simd - -go 1.12 diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go deleted file mode 100644 index 4e1f6d2f7..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256.go +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "crypto/sha256" - "encoding/binary" - "hash" - "runtime" -) - -// Size - The size of a SHA256 checksum in bytes. -const Size = 32 - -// BlockSize - The blocksize of SHA256 in bytes. -const BlockSize = 64 - -const ( - chunk = BlockSize - init0 = 0x6A09E667 - init1 = 0xBB67AE85 - init2 = 0x3C6EF372 - init3 = 0xA54FF53A - init4 = 0x510E527F - init5 = 0x9B05688C - init6 = 0x1F83D9AB - init7 = 0x5BE0CD19 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint32 - x [chunk]byte - nx int - len uint64 -} - -// Reset digest back to default -func (d *digest) Reset() { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - d.nx = 0 - d.len = 0 -} - -type blockfuncType int - -const ( - blockfuncGeneric blockfuncType = iota - blockfuncAvx512 blockfuncType = iota - blockfuncAvx2 blockfuncType = iota - blockfuncAvx blockfuncType = iota - blockfuncSsse blockfuncType = iota - blockfuncSha blockfuncType = iota - blockfuncArm blockfuncType = iota -) - -var blockfunc blockfuncType - -func init() { - is386bit := runtime.GOARCH == "386" - isARM := runtime.GOARCH == "arm" - switch { - case is386bit || isARM: - blockfunc = blockfuncGeneric - case sha && ssse3 && sse41: - blockfunc = blockfuncSha - case avx2: - blockfunc = blockfuncAvx2 - case avx: - blockfunc = blockfuncAvx - case ssse3: - blockfunc = blockfuncSsse - case armSha: - blockfunc = blockfuncArm - default: - blockfunc = blockfuncGeneric - } -} - -// New returns a new hash.Hash computing the SHA256 checksum. -func New() hash.Hash { - if blockfunc != blockfuncGeneric { - d := new(digest) - d.Reset() - return d - } - // Fallback to the standard golang implementation - // if no features were found. - return sha256.New() -} - -// Sum256 - single caller sha256 helper -func Sum256(data []byte) (result [Size]byte) { - var d digest - d.Reset() - d.Write(data) - result = d.checkSum() - return -} - -// Return size of checksum -func (d *digest) Size() int { return Size } - -// Return blocksize of checksum -func (d *digest) BlockSize() int { return BlockSize } - -// Write to digest -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -// Return sha256 sum in bytes -func (d *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d0 := *d - hash := d0.checkSum() - return append(in, hash[:]...) -} - -// Intermediate checksum function -func (d *digest) checkSum() (digest [Size]byte) { - n := d.nx - - var k [64]byte - copy(k[:], d.x[:n]) - - k[n] = 0x80 - - if n >= 56 { - block(d, k[:]) - - // clear block buffer - go compiles this to optimal 1x xorps + 4x movups - // unfortunately expressing this more succinctly results in much worse code - k[0] = 0 - k[1] = 0 - k[2] = 0 - k[3] = 0 - k[4] = 0 - k[5] = 0 - k[6] = 0 - k[7] = 0 - k[8] = 0 - k[9] = 0 - k[10] = 0 - k[11] = 0 - k[12] = 0 - k[13] = 0 - k[14] = 0 - k[15] = 0 - k[16] = 0 - k[17] = 0 - k[18] = 0 - k[19] = 0 - k[20] = 0 - k[21] = 0 - k[22] = 0 - k[23] = 0 - k[24] = 0 - k[25] = 0 - k[26] = 0 - k[27] = 0 - k[28] = 0 - k[29] = 0 - k[30] = 0 - k[31] = 0 - k[32] = 0 - k[33] = 0 - k[34] = 0 - k[35] = 0 - k[36] = 0 - k[37] = 0 - k[38] = 0 - k[39] = 0 - k[40] = 0 - k[41] = 0 - k[42] = 0 - k[43] = 0 - k[44] = 0 - k[45] = 0 - k[46] = 0 - k[47] = 0 - k[48] = 0 - k[49] = 0 - k[50] = 0 - k[51] = 0 - k[52] = 0 - k[53] = 0 - k[54] = 0 - k[55] = 0 - k[56] = 0 - k[57] = 0 - k[58] = 0 - k[59] = 0 - k[60] = 0 - k[61] = 0 - k[62] = 0 - k[63] = 0 - } - binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3) - block(d, k[:]) - - { - const i = 0 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 1 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 2 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 3 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 4 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 5 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 6 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 7 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - - return -} - -func block(dig *digest, p []byte) { - if blockfunc == blockfuncSha { - blockShaGo(dig, p) - } else if blockfunc == blockfuncAvx2 { - blockAvx2Go(dig, p) - } else if blockfunc == blockfuncAvx { - blockAvxGo(dig, p) - } else if blockfunc == blockfuncSsse { - blockSsseGo(dig, p) - } else if blockfunc == blockfuncArm { - blockArmGo(dig, p) - } else if blockfunc == blockfuncGeneric { - blockGeneric(dig, p) - } -} - -func blockGeneric(dig *digest, p []byte) { - var w [64]uint32 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - // Can interlace the computation of w with the - // rounds below if needed for speed. - for i := 0; i < 16; i++ { - j := i * 4 - w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) - } - for i := 16; i < 64; i++ { - v1 := w[i-2] - t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) - v2 := w[i-15] - t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 64; i++ { - t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} - -var _K = []uint32{ - 0x428a2f98, - 0x71374491, - 0xb5c0fbcf, - 0xe9b5dba5, - 0x3956c25b, - 0x59f111f1, - 0x923f82a4, - 0xab1c5ed5, - 0xd807aa98, - 0x12835b01, - 0x243185be, - 0x550c7dc3, - 0x72be5d74, - 0x80deb1fe, - 0x9bdc06a7, - 0xc19bf174, - 0xe49b69c1, - 0xefbe4786, - 0x0fc19dc6, - 0x240ca1cc, - 0x2de92c6f, - 0x4a7484aa, - 0x5cb0a9dc, - 0x76f988da, - 0x983e5152, - 0xa831c66d, - 0xb00327c8, - 0xbf597fc7, - 0xc6e00bf3, - 0xd5a79147, - 0x06ca6351, - 0x14292967, - 0x27b70a85, - 0x2e1b2138, - 0x4d2c6dfc, - 0x53380d13, - 0x650a7354, - 0x766a0abb, - 0x81c2c92e, - 0x92722c85, - 0xa2bfe8a1, - 0xa81a664b, - 0xc24b8b70, - 0xc76c51a3, - 0xd192e819, - 0xd6990624, - 0xf40e3585, - 0x106aa070, - 0x19a4c116, - 0x1e376c08, - 0x2748774c, - 0x34b0bcb5, - 0x391c0cb3, - 0x4ed8aa4a, - 0x5b9cca4f, - 0x682e6ff3, - 0x748f82ee, - 0x78a5636f, - 0x84c87814, - 0x8cc70208, - 0x90befffa, - 0xa4506ceb, - 0xbef9a3f7, - 0xc67178f2, -} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go deleted file mode 100644 index 52fcaee6d..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -//go:noescape -func blockAvx2(h []uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s deleted file mode 100644 index 80b0b739b..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s +++ /dev/null @@ -1,1449 +0,0 @@ -//+build !noasm,!appengine - -// SHA256 implementation for AVX2 - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// This code is based on an Intel White-Paper: -// "Fast SHA-256 Implementations on Intel Architecture Processors" -// -// together with the reference implementation from the following authors: -// James Guilford -// Kirk Yap -// Tim Chen -// -// For Golang it has been converted to Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 -// equivalents -// - -DATA K256<>+0x000(SB)/8, $0x71374491428a2f98 -DATA K256<>+0x008(SB)/8, $0xe9b5dba5b5c0fbcf -DATA K256<>+0x010(SB)/8, $0x71374491428a2f98 -DATA K256<>+0x018(SB)/8, $0xe9b5dba5b5c0fbcf -DATA K256<>+0x020(SB)/8, $0x59f111f13956c25b -DATA K256<>+0x028(SB)/8, $0xab1c5ed5923f82a4 -DATA K256<>+0x030(SB)/8, $0x59f111f13956c25b -DATA K256<>+0x038(SB)/8, $0xab1c5ed5923f82a4 -DATA K256<>+0x040(SB)/8, $0x12835b01d807aa98 -DATA K256<>+0x048(SB)/8, $0x550c7dc3243185be -DATA K256<>+0x050(SB)/8, $0x12835b01d807aa98 -DATA K256<>+0x058(SB)/8, $0x550c7dc3243185be -DATA K256<>+0x060(SB)/8, $0x80deb1fe72be5d74 -DATA K256<>+0x068(SB)/8, $0xc19bf1749bdc06a7 -DATA K256<>+0x070(SB)/8, $0x80deb1fe72be5d74 -DATA K256<>+0x078(SB)/8, $0xc19bf1749bdc06a7 -DATA K256<>+0x080(SB)/8, $0xefbe4786e49b69c1 -DATA K256<>+0x088(SB)/8, $0x240ca1cc0fc19dc6 -DATA K256<>+0x090(SB)/8, $0xefbe4786e49b69c1 -DATA K256<>+0x098(SB)/8, $0x240ca1cc0fc19dc6 -DATA K256<>+0x0a0(SB)/8, $0x4a7484aa2de92c6f -DATA K256<>+0x0a8(SB)/8, $0x76f988da5cb0a9dc -DATA K256<>+0x0b0(SB)/8, $0x4a7484aa2de92c6f -DATA K256<>+0x0b8(SB)/8, $0x76f988da5cb0a9dc -DATA K256<>+0x0c0(SB)/8, $0xa831c66d983e5152 -DATA K256<>+0x0c8(SB)/8, $0xbf597fc7b00327c8 -DATA K256<>+0x0d0(SB)/8, $0xa831c66d983e5152 -DATA K256<>+0x0d8(SB)/8, $0xbf597fc7b00327c8 -DATA K256<>+0x0e0(SB)/8, $0xd5a79147c6e00bf3 -DATA K256<>+0x0e8(SB)/8, $0x1429296706ca6351 -DATA K256<>+0x0f0(SB)/8, $0xd5a79147c6e00bf3 -DATA K256<>+0x0f8(SB)/8, $0x1429296706ca6351 -DATA K256<>+0x100(SB)/8, $0x2e1b213827b70a85 -DATA K256<>+0x108(SB)/8, $0x53380d134d2c6dfc -DATA K256<>+0x110(SB)/8, $0x2e1b213827b70a85 -DATA K256<>+0x118(SB)/8, $0x53380d134d2c6dfc -DATA K256<>+0x120(SB)/8, $0x766a0abb650a7354 -DATA K256<>+0x128(SB)/8, $0x92722c8581c2c92e -DATA K256<>+0x130(SB)/8, $0x766a0abb650a7354 -DATA K256<>+0x138(SB)/8, $0x92722c8581c2c92e -DATA K256<>+0x140(SB)/8, $0xa81a664ba2bfe8a1 -DATA K256<>+0x148(SB)/8, $0xc76c51a3c24b8b70 -DATA K256<>+0x150(SB)/8, $0xa81a664ba2bfe8a1 -DATA K256<>+0x158(SB)/8, $0xc76c51a3c24b8b70 -DATA K256<>+0x160(SB)/8, $0xd6990624d192e819 -DATA K256<>+0x168(SB)/8, $0x106aa070f40e3585 -DATA K256<>+0x170(SB)/8, $0xd6990624d192e819 -DATA K256<>+0x178(SB)/8, $0x106aa070f40e3585 -DATA K256<>+0x180(SB)/8, $0x1e376c0819a4c116 -DATA K256<>+0x188(SB)/8, $0x34b0bcb52748774c -DATA K256<>+0x190(SB)/8, $0x1e376c0819a4c116 -DATA K256<>+0x198(SB)/8, $0x34b0bcb52748774c -DATA K256<>+0x1a0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA K256<>+0x1a8(SB)/8, $0x682e6ff35b9cca4f -DATA K256<>+0x1b0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA K256<>+0x1b8(SB)/8, $0x682e6ff35b9cca4f -DATA K256<>+0x1c0(SB)/8, $0x78a5636f748f82ee -DATA K256<>+0x1c8(SB)/8, $0x8cc7020884c87814 -DATA K256<>+0x1d0(SB)/8, $0x78a5636f748f82ee -DATA K256<>+0x1d8(SB)/8, $0x8cc7020884c87814 -DATA K256<>+0x1e0(SB)/8, $0xa4506ceb90befffa -DATA K256<>+0x1e8(SB)/8, $0xc67178f2bef9a3f7 -DATA K256<>+0x1f0(SB)/8, $0xa4506ceb90befffa -DATA K256<>+0x1f8(SB)/8, $0xc67178f2bef9a3f7 - -DATA K256<>+0x200(SB)/8, $0x0405060700010203 -DATA K256<>+0x208(SB)/8, $0x0c0d0e0f08090a0b -DATA K256<>+0x210(SB)/8, $0x0405060700010203 -DATA K256<>+0x218(SB)/8, $0x0c0d0e0f08090a0b -DATA K256<>+0x220(SB)/8, $0x0b0a090803020100 -DATA K256<>+0x228(SB)/8, $0xffffffffffffffff -DATA K256<>+0x230(SB)/8, $0x0b0a090803020100 -DATA K256<>+0x238(SB)/8, $0xffffffffffffffff -DATA K256<>+0x240(SB)/8, $0xffffffffffffffff -DATA K256<>+0x248(SB)/8, $0x0b0a090803020100 -DATA K256<>+0x250(SB)/8, $0xffffffffffffffff -DATA K256<>+0x258(SB)/8, $0x0b0a090803020100 - -GLOBL K256<>(SB), 8, $608 - -// We need 0x220 stack space aligned on a 512 boundary, so for the -// worstcase-aligned SP we need twice this amount, being 1088 (=0x440) -// -// SP aligned end-aligned stacksize -// 100013d0 10001400 10001620 592 -// 100013d8 10001400 10001620 584 -// 100013e0 10001600 10001820 1088 -// 100013e8 10001600 10001820 1080 - -// func blockAvx2(h []uint32, message []uint8) -TEXT ·blockAvx2(SB),$1088-48 - - MOVQ h+0(FP), DI // DI: &h - MOVQ message_base+24(FP), SI // SI: &message - MOVQ message_len+32(FP), DX // len(message) - ADDQ SI, DX // end pointer of input - MOVQ SP, R11 // copy stack pointer - ADDQ $0x220, SP // sp += 0x220 - ANDQ $0xfffffffffffffe00, SP // align stack frame - ADDQ $0x1c0, SP - MOVQ DI, 0x40(SP) // save ctx - MOVQ SI, 0x48(SP) // save input - MOVQ DX, 0x50(SP) // save end pointer - MOVQ R11, 0x58(SP) // save copy of stack pointer - - WORD $0xf8c5; BYTE $0x77 // vzeroupper - ADDQ $0x40, SI // input++ - MOVL (DI), AX - MOVQ SI, R12 // borrow $T1 - MOVL 4(DI), BX - CMPQ SI, DX // $_end - MOVL 8(DI), CX - LONG $0xe4440f4c // cmove r12,rsp /* next block or random data */ - MOVL 12(DI), DX - MOVL 16(DI), R8 - MOVL 20(DI), R9 - MOVL 24(DI), R10 - MOVL 28(DI), R11 - - LEAQ K256<>(SB), BP - LONG $0x856f7dc5; LONG $0x00000220 // VMOVDQA YMM8, 0x220[rbp] /* vmovdqa ymm8,YMMWORD PTR [rip+0x220] */ - LONG $0x8d6f7dc5; LONG $0x00000240 // VMOVDQA YMM9, 0x240[rbp] /* vmovdqa ymm9,YMMWORD PTR [rip+0x240] */ - LONG $0x956f7dc5; LONG $0x00000200 // VMOVDQA YMM10, 0x200[rbp] /* vmovdqa ymm7,YMMWORD PTR [rip+0x200] */ - -loop0: - LONG $0x6f7dc1c4; BYTE $0xfa // VMOVDQA YMM7, YMM10 - - // Load first 16 dwords from two blocks - MOVOU -64(SI), X0 // vmovdqu xmm0,XMMWORD PTR [rsi-0x40] - MOVOU -48(SI), X1 // vmovdqu xmm1,XMMWORD PTR [rsi-0x30] - MOVOU -32(SI), X2 // vmovdqu xmm2,XMMWORD PTR [rsi-0x20] - MOVOU -16(SI), X3 // vmovdqu xmm3,XMMWORD PTR [rsi-0x10] - - // Byte swap data and transpose data into high/low - LONG $0x387dc3c4; WORD $0x2404; BYTE $0x01 // vinserti128 ymm0,ymm0,[r12],0x1 - LONG $0x3875c3c4; LONG $0x0110244c // vinserti128 ymm1,ymm1,0x10[r12],0x1 - LONG $0x007de2c4; BYTE $0xc7 // vpshufb ymm0,ymm0,ymm7 - LONG $0x386dc3c4; LONG $0x01202454 // vinserti128 ymm2,ymm2,0x20[r12],0x1 - LONG $0x0075e2c4; BYTE $0xcf // vpshufb ymm1,ymm1,ymm7 - LONG $0x3865c3c4; LONG $0x0130245c // vinserti128 ymm3,ymm3,0x30[r12],0x1 - - LEAQ K256<>(SB), BP - LONG $0x006de2c4; BYTE $0xd7 // vpshufb ymm2,ymm2,ymm7 - LONG $0x65fefdc5; BYTE $0x00 // vpaddd ymm4,ymm0,[rbp] - LONG $0x0065e2c4; BYTE $0xdf // vpshufb ymm3,ymm3,ymm7 - LONG $0x6dfef5c5; BYTE $0x20 // vpaddd ymm5,ymm1,0x20[rbp] - LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,0x40[rbp] - LONG $0x7dfee5c5; BYTE $0x60 // vpaddd ymm7,ymm3,0x60[rbp] - - LONG $0x247ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm4 - XORQ R14, R14 - LONG $0x6c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm5 - - ADDQ $-0x40, SP - MOVQ BX, DI - LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 - XORQ CX, DI // magic - LONG $0x7c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm7 - MOVQ R9, R12 - ADDQ $0x80, BP - -loop1: - // Schedule 48 input dwords, by doing 3 rounds of 12 each - // Note: SIMD instructions are interleaved with the SHA calculations - ADDQ $-0x40, SP - LONG $0x0f75e3c4; WORD $0x04e0 // vpalignr ymm4,ymm1,ymm0,0x4 - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) - LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0x0f65e3c4; WORD $0x04fa // vpalignr ymm7,ymm3,ymm2,0x4 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0xc7fefdc5 // vpaddd ymm0,ymm0,ymm7 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - LONG $0xfb70fdc5; BYTE $0xfa // vpshufd ymm7,ymm3,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) - LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - LONG $0xc4fefdc5 // vpaddd ymm0,ymm0,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) - LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf870fdc5; BYTE $0x50 // vpshufd ymm7,ymm0,0x50 - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) - LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0x75fefdc5; BYTE $0x00 // vpaddd ymm6,ymm0,[rbp+0x0] - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 - LONG $0x0f6de3c4; WORD $0x04e1 // vpalignr ymm4,ymm2,ymm1,0x4 - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) - LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0x0f7de3c4; WORD $0x04fb // vpalignr ymm7,ymm0,ymm3,0x4 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0xcffef5c5 // vpaddd ymm1,ymm1,ymm7 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - LONG $0xf870fdc5; BYTE $0xfa // vpshufd ymm7,ymm0,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) - LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - LONG $0xccfef5c5 // vpaddd ymm1,ymm1,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) - LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf970fdc5; BYTE $0x50 // vpshufd ymm7,ymm1,0x50 - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) - LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0x75fef5c5; BYTE $0x20 // vpaddd ymm6,ymm1,[rbp+0x20] - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 - - LONG $0x24648d48; BYTE $0xc0 // lea rsp,[rsp-0x40] - LONG $0x0f65e3c4; WORD $0x04e2 // vpalignr ymm4,ymm3,ymm2,0x4 - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) - LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0x0f75e3c4; WORD $0x04f8 // vpalignr ymm7,ymm1,ymm0,0x4 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0xd7feedc5 // vpaddd ymm2,ymm2,ymm7 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - LONG $0xf970fdc5; BYTE $0xfa // vpshufd ymm7,ymm1,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) - LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - LONG $0xd4feedc5 // vpaddd ymm2,ymm2,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) - LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xfa70fdc5; BYTE $0x50 // vpshufd ymm7,ymm2,0x50 - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) - LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,[rbp+0x40] - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 - LONG $0x0f7de3c4; WORD $0x04e3 // vpalignr ymm4,ymm0,ymm3,0x4 - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) - LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0x0f6de3c4; WORD $0x04f9 // vpalignr ymm7,ymm2,ymm1,0x4 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0xdffee5c5 // vpaddd ymm3,ymm3,ymm7 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - LONG $0xfa70fdc5; BYTE $0xfa // vpshufd ymm7,ymm2,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) - LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - LONG $0xdcfee5c5 // vpaddd ymm3,ymm3,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) - LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xfb70fdc5; BYTE $0x50 // vpshufd ymm7,ymm3,0x50 - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) - LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0x75fee5c5; BYTE $0x60 // vpaddd ymm6,ymm3,[rbp+0x60] - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 - ADDQ $0x80, BP - - CMPB 0x3(BP), $0x0 - JNE loop1 - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x40) - LONG $0x245c0344; BYTE $0x40 // add r11d,[rsp+0x40] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x44) - LONG $0x24540344; BYTE $0x44 // add r10d,[rsp+0x44] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x48) - LONG $0x244c0344; BYTE $0x48 // add r9d,[rsp+0x48] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x4c) - LONG $0x24440344; BYTE $0x4c // add r8d,[rsp+0x4c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x60) - LONG $0x60245403 // add edx,[rsp+0x60] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x64) - LONG $0x64244c03 // add ecx,[rsp+0x64] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x68) - LONG $0x68245c03 // add ebx,[rsp+0x68] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x6c) - LONG $0x6c244403 // add eax,[rsp+0x6c] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x00) - LONG $0x241c0344 // add r11d,[rsp] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x04) - LONG $0x24540344; BYTE $0x04 // add r10d,[rsp+0x4] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x08) - LONG $0x244c0344; BYTE $0x08 // add r9d,[rsp+0x8] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x0c) - LONG $0x24440344; BYTE $0x0c // add r8d,[rsp+0xc] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x20) - LONG $0x20245403 // add edx,[rsp+0x20] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x24) - LONG $0x24244c03 // add ecx,[rsp+0x24] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x28) - LONG $0x28245c03 // add ebx,[rsp+0x28] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x2c) - LONG $0x2c244403 // add eax,[rsp+0x2c] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - MOVQ 0x200(SP), DI // $_ctx - ADDQ R14, AX - - LEAQ 0x1c0(SP), BP - - ADDL (DI), AX - ADDL 4(DI), BX - ADDL 8(DI), CX - ADDL 12(DI), DX - ADDL 16(DI), R8 - ADDL 20(DI), R9 - ADDL 24(DI), R10 - ADDL 28(DI), R11 - - MOVL AX, (DI) - MOVL BX, 4(DI) - MOVL CX, 8(DI) - MOVL DX, 12(DI) - MOVL R8, 16(DI) - MOVL R9, 20(DI) - MOVL R10, 24(DI) - MOVL R11, 28(DI) - - CMPQ SI, 0x50(BP) // $_end - JE done - - XORQ R14, R14 - MOVQ BX, DI - XORQ CX, DI // magic - MOVQ R9, R12 - -loop2: - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, BP, 0x10) - LONG $0x105d0344 // add r11d,[rbp+0x10] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, BP, 0x14) - LONG $0x14550344 // add r10d,[rbp+0x14] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, BP, 0x18) - LONG $0x184d0344 // add r9d,[rbp+0x18] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, BP, 0x1c) - LONG $0x1c450344 // add r8d,[rbp+0x1c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, BP, 0x30) - WORD $0x5503; BYTE $0x30 // add edx,[rbp+0x30] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, BP, 0x34) - WORD $0x4d03; BYTE $0x34 // add ecx,[rbp+0x34] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, BP, 0x38) - WORD $0x5d03; BYTE $0x38 // add ebx,[rbp+0x38] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, BP, 0x3c) - WORD $0x4503; BYTE $0x3c // add eax,[rbp+0x3c] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - ADDQ $-0x40, BP - CMPQ BP, SP - JAE loop2 - - MOVQ 0x200(SP), DI // $_ctx - ADDQ R14, AX - - ADDQ $0x1c0, SP - - ADDL (DI), AX - ADDL 4(DI), BX - ADDL 8(DI), CX - ADDL 12(DI), DX - ADDL 16(DI), R8 - ADDL 20(DI), R9 - - ADDQ $0x80, SI // input += 2 - ADDL 24(DI), R10 - MOVQ SI, R12 - ADDL 28(DI), R11 - CMPQ SI, 0x50(SP) // input == _end - - MOVL AX, (DI) - LONG $0xe4440f4c // cmove r12,rsp /* next block or stale data */ - MOVL AX, (DI) - MOVL BX, 4(DI) - MOVL CX, 8(DI) - MOVL DX, 12(DI) - MOVL R8, 16(DI) - MOVL R9, 20(DI) - MOVL R10, 24(DI) - MOVL R11, 28(DI) - - JBE loop0 - LEAQ (SP), BP - -done: - MOVQ BP, SP - MOVQ 0x58(SP), SP // restore saved stack pointer - WORD $0xf8c5; BYTE $0x77 // vzeroupper - - RET - diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm deleted file mode 100644 index c959b1aa2..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm +++ /dev/null @@ -1,686 +0,0 @@ - -// 16x Parallel implementation of SHA256 for AVX512 - -// -// Minio Cloud Storage, (C) 2017 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// -// This code is based on the Intel Multi-Buffer Crypto for IPSec library -// and more specifically the following implementation: -// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm -// -// For Golang it has been converted into Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble the AVX512 instructions -// - -// Copyright (c) 2017, Intel Corporation -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of Intel Corporation nor the names of its contributors -// may be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#define SHA256_DIGEST_ROW_SIZE 64 - -// arg1 -#define STATE rdi -#define STATE_P9 DI -// arg2 -#define INP_SIZE rsi -#define INP_SIZE_P9 SI - -#define IDX rcx -#define TBL rdx -#define TBL_P9 DX - -#define INPUT rax -#define INPUT_P9 AX - -#define inp0 r9 -#define SCRATCH_P9 R12 -#define SCRATCH r12 -#define maskp r13 -#define MASKP_P9 R13 -#define mask r14 -#define MASK_P9 R14 - -#define A zmm0 -#define B zmm1 -#define C zmm2 -#define D zmm3 -#define E zmm4 -#define F zmm5 -#define G zmm6 -#define H zmm7 -#define T1 zmm8 -#define TMP0 zmm9 -#define TMP1 zmm10 -#define TMP2 zmm11 -#define TMP3 zmm12 -#define TMP4 zmm13 -#define TMP5 zmm14 -#define TMP6 zmm15 - -#define W0 zmm16 -#define W1 zmm17 -#define W2 zmm18 -#define W3 zmm19 -#define W4 zmm20 -#define W5 zmm21 -#define W6 zmm22 -#define W7 zmm23 -#define W8 zmm24 -#define W9 zmm25 -#define W10 zmm26 -#define W11 zmm27 -#define W12 zmm28 -#define W13 zmm29 -#define W14 zmm30 -#define W15 zmm31 - - -#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \ - \ - \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0} - \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0} - \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0} - \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0} - \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0} - \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0} - \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0} - \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0} - \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0} - \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0} - \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0} - \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0} - \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0} - \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0} - \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0} - \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0} - \ - \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} - \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} - \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} - \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} - \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} - \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} - \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} - \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} - \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} - \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} - \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} - \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} - \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} - \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} - \ - \ // process top half - vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0} - vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2} - vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0} - vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2} - \ - vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1} - vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2} - vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3} - vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0} - \ - \ // use r2 in place of t0 - vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0} - vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2} - vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0} - vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2} - \ - vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1} - vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2} - vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3} - vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0} - \ - \ // use r6 in place of t0 - vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0} - vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2} - vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0} - vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2} - \ - vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1} - vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2} - vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3} - vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0} - \ - \ // use r10 in place of t0 - vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0} - vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2} - vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00} - vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02} - \ - vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1} - vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2} - vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3} - vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0} - \ - \ // At this point, the registers that contain interesting data are: - \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12 - \ // Can use t1 and r14 as scratch registers - LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \ - LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \ - \ - vmovdqu32 _r14, [rbx] \ - vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0} - vmovdqu32 _t1, [r8] \ - vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4} - \ - vmovdqu32 _r2, [rbx] \ - vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1} - vmovdqu32 _t0, [r8] \ - vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5} - \ - vmovdqu32 _r3, [rbx] \ - vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2} - vmovdqu32 _r7, [r8] \ - vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6} - \ - vmovdqu32 _r1, [rbx] \ - vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3} - vmovdqu32 _r5, [r8] \ - vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7} - \ - vmovdqu32 _r0, [rbx] \ - vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0} - vmovdqu32 _r4, [r8] \ - vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4} - \ - vmovdqu32 _r6, [rbx] \ - vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1} - vmovdqu32 _r10, [r8] \ - vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5} - \ - vmovdqu32 _r11, [rbx] \ - vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2} - vmovdqu32 _r15, [r8] \ - vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6} - \ - vmovdqu32 _r9, [rbx] \ - vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3} - vmovdqu32 _r13, [r8] \ - vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7} - \ - \ // At this point r8 and r12 can be used as scratch registers - vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} - vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} - \ - vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} - vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} - \ - vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} - vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - \ - vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} - vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} - \ - vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} - vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} - \ - vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} - vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - \ - vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} - vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} - \ - vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} - vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} - \ - vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - - -// CH(A, B, C) = (A&B) ^ (~A&C) -// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G) -// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22 -// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25 -// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3 -// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10 - -// Main processing loop per round -#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \ - \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt - \ // T2 = SIGMA0(A) + MAJ(A, B, C) - \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2 - \ - \ // H becomes T2, then add T1 for A - \ // D becomes D + T1 for E - \ - vpaddd T1, _H, TMP3 \ // T1 = H + Kt - vmovdqu32 TMP0, _E \ - vprord TMP1, _E, 6 \ // ROR_6(E) - vprord TMP2, _E, 11 \ // ROR_11(E) - vprord TMP3, _E, 25 \ // ROR_25(E) - vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G) - vpaddd T1, T1, _WT \ // T1 = T1 + Wt - vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E) - vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G) - vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E) - vpaddd _D, _D, T1 \ // D = D + T1 - \ - vprord _H, _A, 2 \ // ROR_2(A) - vprord TMP2, _A, 13 \ // ROR_13(A) - vprord TMP3, _A, 22 \ // ROR_22(A) - vmovdqu32 TMP0, _A \ - vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C) - vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A) - vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C) - vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1 - \ - vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt - - -#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \ - vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2) - vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2) - vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2) - vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2) - \ - vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) - vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7 - \ - vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15) - vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15) - vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15) - vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15) - \ - vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + - \ // Wt-7 + sigma0(Wt-15) + - - -// Note this is reading in a block of data for one lane -// When all 16 are read, the data must be transposed to build msg schedule -#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \ - TESTQ $(1<(SB), TBL_P9 - vmovdqu32 TMP2, [TBL] - - // Get first K from table - MOVQ table+16(FP), TBL_P9 - vmovdqu32 TMP3, [TBL] - - // Save digests for later addition - vmovdqu32 [SCRATCH + 64*0], A - vmovdqu32 [SCRATCH + 64*1], B - vmovdqu32 [SCRATCH + 64*2], C - vmovdqu32 [SCRATCH + 64*3], D - vmovdqu32 [SCRATCH + 64*4], E - vmovdqu32 [SCRATCH + 64*5], F - vmovdqu32 [SCRATCH + 64*6], G - vmovdqu32 [SCRATCH + 64*7], H - - add IDX, 64 - - // Transpose input data - TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1) - - vpshufb W0, W0, TMP2 - vpshufb W1, W1, TMP2 - vpshufb W2, W2, TMP2 - vpshufb W3, W3, TMP2 - vpshufb W4, W4, TMP2 - vpshufb W5, W5, TMP2 - vpshufb W6, W6, TMP2 - vpshufb W7, W7, TMP2 - vpshufb W8, W8, TMP2 - vpshufb W9, W9, TMP2 - vpshufb W10, W10, TMP2 - vpshufb W11, W11, TMP2 - vpshufb W12, W12, TMP2 - vpshufb W13, W13, TMP2 - vpshufb W14, W14, TMP2 - vpshufb W15, W15, TMP2 - - // MSG Schedule for W0-W15 is now complete in registers - // Process first 48 rounds - // Calculate next Wt+16 after processing is complete and Wt is unneeded - - PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - - // Check if this is the last block - sub INP_SIZE, 1 - JE lastLoop - - // Load next mask for inputs - ADDQ $8, MASKP_P9 - MOVQ (MASKP_P9), MASK_P9 - - // Process last 16 rounds - // Read in next block msg data for use in first 16 words of msg sched - - PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_00_15( W0, 0, skipNext0) - PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_00_15( W1, 1, skipNext1) - PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_00_15( W2, 2, skipNext2) - PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_00_15( W3, 3, skipNext3) - PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_00_15( W4, 4, skipNext4) - PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_00_15( W5, 5, skipNext5) - PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_00_15( W6, 6, skipNext6) - PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_00_15( W7, 7, skipNext7) - PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_00_15( W8, 8, skipNext8) - PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_00_15( W9, 9, skipNext9) - PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_00_15(W10, 10, skipNext10) - PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_00_15(W11, 11, skipNext11) - PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_00_15(W12, 12, skipNext12) - PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_00_15(W13, 13, skipNext13) - PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_00_15(W14, 14, skipNext14) - PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_00_15(W15, 15, skipNext15) - - // Add old digest - vmovdqu32 TMP2, A - vmovdqu32 A, [SCRATCH + 64*0] - vpaddd A{k1}, A, TMP2 - vmovdqu32 TMP2, B - vmovdqu32 B, [SCRATCH + 64*1] - vpaddd B{k1}, B, TMP2 - vmovdqu32 TMP2, C - vmovdqu32 C, [SCRATCH + 64*2] - vpaddd C{k1}, C, TMP2 - vmovdqu32 TMP2, D - vmovdqu32 D, [SCRATCH + 64*3] - vpaddd D{k1}, D, TMP2 - vmovdqu32 TMP2, E - vmovdqu32 E, [SCRATCH + 64*4] - vpaddd E{k1}, E, TMP2 - vmovdqu32 TMP2, F - vmovdqu32 F, [SCRATCH + 64*5] - vpaddd F{k1}, F, TMP2 - vmovdqu32 TMP2, G - vmovdqu32 G, [SCRATCH + 64*6] - vpaddd G{k1}, G, TMP2 - vmovdqu32 TMP2, H - vmovdqu32 H, [SCRATCH + 64*7] - vpaddd H{k1}, H, TMP2 - - kmovq k1, mask - JMP lloop - -lastLoop: - // Process last 16 rounds - PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) - PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) - PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) - PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) - PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) - PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) - PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) - PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) - PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) - PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) - PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) - PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) - PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) - PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) - PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) - PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) - - // Add old digest - vmovdqu32 TMP2, A - vmovdqu32 A, [SCRATCH + 64*0] - vpaddd A{k1}, A, TMP2 - vmovdqu32 TMP2, B - vmovdqu32 B, [SCRATCH + 64*1] - vpaddd B{k1}, B, TMP2 - vmovdqu32 TMP2, C - vmovdqu32 C, [SCRATCH + 64*2] - vpaddd C{k1}, C, TMP2 - vmovdqu32 TMP2, D - vmovdqu32 D, [SCRATCH + 64*3] - vpaddd D{k1}, D, TMP2 - vmovdqu32 TMP2, E - vmovdqu32 E, [SCRATCH + 64*4] - vpaddd E{k1}, E, TMP2 - vmovdqu32 TMP2, F - vmovdqu32 F, [SCRATCH + 64*5] - vpaddd F{k1}, F, TMP2 - vmovdqu32 TMP2, G - vmovdqu32 G, [SCRATCH + 64*6] - vpaddd G{k1}, G, TMP2 - vmovdqu32 TMP2, H - vmovdqu32 H, [SCRATCH + 64*7] - vpaddd H{k1}, H, TMP2 - - // Write out digest - vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A - vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B - vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C - vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D - vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E - vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F - vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G - vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H - - VZEROUPPER - RET - -// -// Tables -// - -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b -GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 - -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D -GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 - -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F -GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go deleted file mode 100644 index db8e48d31..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go +++ /dev/null @@ -1,500 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "encoding/binary" - "errors" - "hash" - "sort" - "sync/atomic" - "time" -) - -//go:noescape -func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte) - -// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to -// differentiate with default initialiation value of 0 -const Avx512ServerUID = 16 - -var uidCounter uint64 - -// NewAvx512 - initialize sha256 Avx512 implementation. -func NewAvx512(a512srv *Avx512Server) hash.Hash { - uid := atomic.AddUint64(&uidCounter, 1) - return &Avx512Digest{uid: uid, a512srv: a512srv} -} - -// Avx512Digest - Type for computing SHA256 using Avx512 -type Avx512Digest struct { - uid uint64 - a512srv *Avx512Server - x [chunk]byte - nx int - len uint64 - final bool - result [Size]byte -} - -// Size - Return size of checksum -func (d *Avx512Digest) Size() int { return Size } - -// BlockSize - Return blocksize of checksum -func (d Avx512Digest) BlockSize() int { return BlockSize } - -// Reset - reset sha digest to its initial values -func (d *Avx512Digest) Reset() { - d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true} - d.nx = 0 - d.len = 0 - d.final = false -} - -// Write to digest -func (d *Avx512Digest) Write(p []byte) (nn int, err error) { - - if d.final { - return 0, errors.New("Avx512Digest already finalized. Reset first before writing again") - } - - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]} - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]} - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -// Sum - Return sha256 sum in bytes -func (d *Avx512Digest) Sum(in []byte) (result []byte) { - - if d.final { - return append(in, d.result[:]...) - } - - trail := make([]byte, 0, 128) - trail = append(trail, d.x[:d.nx]...) - - len := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte - tmp[0] = 0x80 - if len%64 < 56 { - trail = append(trail, tmp[0:56-len%64]...) - } else { - trail = append(trail, tmp[0:64+56-len%64]...) - } - d.nx = 0 - - // Length in bits. - len <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(len >> (56 - 8*i)) - } - trail = append(trail, tmp[0:8]...) - - sumCh := make(chan [Size]byte) - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh} - d.result = <-sumCh - d.final = true - return append(in, d.result[:]...) -} - -var table = [512]uint64{ - 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, - 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, - 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, - 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, - 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, - 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, - 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, - 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, - 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, - 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, - 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, - 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, - 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, - 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, - 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, - 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, - 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, - 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, - 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, - 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, - 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, - 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, - 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, - 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, - 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, - 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, - 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, - 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, - 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, - 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, - 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, - 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, - 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, - 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, - 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, - 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, - 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, - 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, - 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, - 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, - 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, - 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, - 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, - 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, - 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, - 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, - 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, - 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, - 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, - 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, - 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, - 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, - 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, - 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, - 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, - 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, - 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, - 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, - 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, - 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, - 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, - 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, - 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, - 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, - 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, - 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, - 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, - 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, - 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, - 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, - 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, - 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, - 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, - 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, - 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, - 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, - 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, - 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, - 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, - 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, - 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, - 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, - 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, - 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, - 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, - 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, - 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, - 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, - 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, - 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, - 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, - 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, - 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, - 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, - 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, - 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, - 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, - 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, - 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, - 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, - 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, - 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, - 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, - 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, - 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, - 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, - 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, - 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, - 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, - 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, - 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, - 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, - 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, - 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, - 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, - 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, - 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, - 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, - 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, - 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, - 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, - 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, - 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, - 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, - 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, - 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, - 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, - 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2} - -// Interface function to assembly ode -func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte { - - scratch := [512]byte{} - sha256X16Avx512(digests, &scratch, &table, mask, input) - - output := [16][Size]byte{} - for i := 0; i < 16; i++ { - output[i] = getDigest(i, digests[:]) - } - - return output -} - -func getDigest(index int, state []byte) (sum [Size]byte) { - for j := 0; j < 16; j += 2 { - for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size { - binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4])) - } - } - return -} - -// Message to send across input channel -type blockInput struct { - uid uint64 - msg []byte - reset bool - final bool - sumCh chan [Size]byte -} - -// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations -type Avx512Server struct { - blocksCh chan blockInput // Input channel - totalIn int // Total number of inputs waiting to be processed - lanes [16]Avx512LaneInfo // Array with info per lane (out of 16) - digests map[uint64][Size]byte // Map of uids to (interim) digest results -} - -// Avx512LaneInfo - Info for each lane -type Avx512LaneInfo struct { - uid uint64 // unique identification for this SHA processing - block []byte // input block to be processed - outputCh chan [Size]byte // channel for output result -} - -// NewAvx512Server - Create new object for parallel processing handling -func NewAvx512Server() *Avx512Server { - a512srv := &Avx512Server{} - a512srv.digests = make(map[uint64][Size]byte) - a512srv.blocksCh = make(chan blockInput) - - // Start a single thread for reading from the input channel - go a512srv.Process() - return a512srv -} - -// Process - Sole handler for reading from the input channel -func (a512srv *Avx512Server) Process() { - for { - select { - case block := <-a512srv.blocksCh: - if block.reset { - a512srv.reset(block.uid) - continue - } - index := block.uid & 0xf - // fmt.Println("Adding message:", block.uid, index) - - if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs - //fmt.Println("Invoking Blocks()") - a512srv.blocks() - } - a512srv.totalIn++ - a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg} - if block.final { - a512srv.lanes[index].outputCh = block.sumCh - } - if a512srv.totalIn == len(a512srv.lanes) { - // fmt.Println("Invoking Blocks() while FULL: ") - a512srv.blocks() - } - - // TODO: test with larger timeout - case <-time.After(1 * time.Microsecond): - for _, lane := range a512srv.lanes { - if lane.block != nil { // check if there is any input to process - // fmt.Println("Invoking Blocks() on TIMEOUT: ") - a512srv.blocks() - break // we are done - } - } - } - } -} - -// Do a reset for this calculation -func (a512srv *Avx512Server) reset(uid uint64) { - - // Check if there is a message still waiting to be processed (and remove if so) - for i, lane := range a512srv.lanes { - if lane.uid == uid { - if lane.block != nil { - a512srv.lanes[i] = Avx512LaneInfo{} // clear message - a512srv.totalIn-- - } - } - } - - // Delete entry from hash map - delete(a512srv.digests, uid) -} - -// Invoke assembly and send results back -func (a512srv *Avx512Server) blocks() { - - inputs := [16][]byte{} - for i := range inputs { - inputs[i] = a512srv.lanes[i].block - } - - mask := expandMask(genMask(inputs)) - outputs := blockAvx512(a512srv.getDigests(), inputs, mask) - - a512srv.totalIn = 0 - for i := 0; i < len(outputs); i++ { - uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh - a512srv.digests[uid] = outputs[i] - a512srv.lanes[i] = Avx512LaneInfo{} - - if outputCh != nil { - // Send back result - outputCh <- outputs[i] - delete(a512srv.digests, uid) // Delete entry from hashmap - } - } -} - -func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) { - a512srv.blocksCh <- blockInput{uid: uid, msg: p} - return len(p), nil -} - -// Sum - return sha256 sum in bytes for a given sum id. -func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte { - sumCh := make(chan [32]byte) - a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh} - return <-sumCh -} - -func (a512srv *Avx512Server) getDigests() *[512]byte { - digests := [512]byte{} - for i, lane := range a512srv.lanes { - a, ok := a512srv.digests[lane.uid] - if ok { - binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4])) - binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8])) - binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12])) - binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16])) - binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20])) - binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24])) - binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28])) - binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32])) - } else { - binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) - binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) - binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) - binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) - binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) - binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) - binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) - binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) - } - } - return &digests -} - -// Helper struct for sorting blocks based on length -type lane struct { - len uint - pos uint -} - -type lanes []lane - -func (lns lanes) Len() int { return len(lns) } -func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] } -func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len } - -// Helper struct for -type maskRounds struct { - mask uint64 - rounds uint64 -} - -func genMask(input [16][]byte) [16]maskRounds { - - // Sort on blocks length small to large - var sorted [16]lane - for c, inpt := range input { - sorted[c] = lane{uint(len(inpt)), uint(c)} - } - sort.Sort(lanes(sorted[:])) - - // Create mask array including 'rounds' between masks - m, round, index := uint64(0xffff), uint64(0), 0 - var mr [16]maskRounds - for _, s := range sorted { - if s.len > 0 { - if uint64(s.len)>>6 > round { - mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round} - index++ - } - round = uint64(s.len) >> 6 - } - m = m & ^(1 << uint(s.pos)) - } - - return mr -} - -// TODO: remove function -func expandMask(mr [16]maskRounds) []uint64 { - size := uint64(0) - for _, r := range mr { - size += r.rounds - } - result, index := make([]uint64, size), 0 - for _, r := range mr { - for j := uint64(0); j < r.rounds; j++ { - result[index] = r.mask - index++ - } - } - return result -} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s deleted file mode 100644 index 275bcacbc..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s +++ /dev/null @@ -1,267 +0,0 @@ -//+build !noasm,!appengine - -TEXT ·sha256X16Avx512(SB), 7, $0 - MOVQ digests+0(FP), DI - MOVQ scratch+8(FP), R12 - MOVQ mask_len+32(FP), SI - MOVQ mask_base+24(FP), R13 - MOVQ (R13), R14 - LONG $0x92fbc1c4; BYTE $0xce - LEAQ inputs+48(FP), AX - QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07 - MOVQ table+16(FP), DX - WORD $0x3148; BYTE $0xc9 - TESTQ $(1<<0), R14 - JE skipInput0 - MOVQ 0*24(AX), R9 - LONG $0x487cc162; WORD $0x0410; BYTE $0x09 - -skipInput0: - TESTQ $(1<<1), R14 - JE skipInput1 - MOVQ 1*24(AX), R9 - LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 - -skipInput1: - TESTQ $(1<<2), R14 - JE skipInput2 - MOVQ 2*24(AX), R9 - LONG $0x487cc162; WORD $0x1410; BYTE $0x09 - -skipInput2: - TESTQ $(1<<3), R14 - JE skipInput3 - MOVQ 3*24(AX), R9 - LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 - -skipInput3: - TESTQ $(1<<4), R14 - JE skipInput4 - MOVQ 4*24(AX), R9 - LONG $0x487cc162; WORD $0x2410; BYTE $0x09 - -skipInput4: - TESTQ $(1<<5), R14 - JE skipInput5 - MOVQ 5*24(AX), R9 - LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 - -skipInput5: - TESTQ $(1<<6), R14 - JE skipInput6 - MOVQ 6*24(AX), R9 - LONG $0x487cc162; WORD $0x3410; BYTE $0x09 - -skipInput6: - TESTQ $(1<<7), R14 - JE skipInput7 - MOVQ 7*24(AX), R9 - LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 - -skipInput7: - TESTQ $(1<<8), R14 - JE skipInput8 - MOVQ 8*24(AX), R9 - LONG $0x487c4162; WORD $0x0410; BYTE $0x09 - -skipInput8: - TESTQ $(1<<9), R14 - JE skipInput9 - MOVQ 9*24(AX), R9 - LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 - -skipInput9: - TESTQ $(1<<10), R14 - JE skipInput10 - MOVQ 10*24(AX), R9 - LONG $0x487c4162; WORD $0x1410; BYTE $0x09 - -skipInput10: - TESTQ $(1<<11), R14 - JE skipInput11 - MOVQ 11*24(AX), R9 - LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 - -skipInput11: - TESTQ $(1<<12), R14 - JE skipInput12 - MOVQ 12*24(AX), R9 - LONG $0x487c4162; WORD $0x2410; BYTE $0x09 - -skipInput12: - TESTQ $(1<<13), R14 - JE skipInput13 - MOVQ 13*24(AX), R9 - LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 - -skipInput13: - TESTQ $(1<<14), R14 - JE skipInput14 - MOVQ 14*24(AX), R9 - LONG $0x487c4162; WORD $0x3410; BYTE $0x09 - -skipInput14: - TESTQ $(1<<15), R14 - JE skipInput15 - MOVQ 15*24(AX), R9 - LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 - -skipInput15: -lloop: - LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX - LONG $0x487e7162; WORD $0x1a6f - MOVQ table+16(FP), DX - QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88 - LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX - LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 - QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005 - JE lastLoop - ADDQ $8, R13 - MOVQ (R13), R14 - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31 - TESTQ $(1<<0), R14 - JE skipNext0 - MOVQ 0*24(AX), R9 - LONG $0x487cc162; WORD $0x0410; BYTE $0x09 - -skipNext0: - QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32 - TESTQ $(1<<1), R14 - JE skipNext1 - MOVQ 1*24(AX), R9 - LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 - -skipNext1: - QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33 - TESTQ $(1<<2), R14 - JE skipNext2 - MOVQ 2*24(AX), R9 - LONG $0x487cc162; WORD $0x1410; BYTE $0x09 - -skipNext2: - QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34 - TESTQ $(1<<3), R14 - JE skipNext3 - MOVQ 3*24(AX), R9 - LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 - -skipNext3: - QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35 - TESTQ $(1<<4), R14 - JE skipNext4 - MOVQ 4*24(AX), R9 - LONG $0x487cc162; WORD $0x2410; BYTE $0x09 - -skipNext4: - QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36 - TESTQ $(1<<5), R14 - JE skipNext5 - MOVQ 5*24(AX), R9 - LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 - -skipNext5: - QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37 - TESTQ $(1<<6), R14 - JE skipNext6 - MOVQ 6*24(AX), R9 - LONG $0x487cc162; WORD $0x3410; BYTE $0x09 - -skipNext6: - QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38 - TESTQ $(1<<7), R14 - JE skipNext7 - MOVQ 7*24(AX), R9 - LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 - -skipNext7: - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39 - TESTQ $(1<<8), R14 - JE skipNext8 - MOVQ 8*24(AX), R9 - LONG $0x487c4162; WORD $0x0410; BYTE $0x09 - -skipNext8: - QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a - TESTQ $(1<<9), R14 - JE skipNext9 - MOVQ 9*24(AX), R9 - LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 - -skipNext9: - QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b - TESTQ $(1<<10), R14 - JE skipNext10 - MOVQ 10*24(AX), R9 - LONG $0x487c4162; WORD $0x1410; BYTE $0x09 - -skipNext10: - QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c - TESTQ $(1<<11), R14 - JE skipNext11 - MOVQ 11*24(AX), R9 - LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 - -skipNext11: - QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d - TESTQ $(1<<12), R14 - JE skipNext12 - MOVQ 12*24(AX), R9 - LONG $0x487c4162; WORD $0x2410; BYTE $0x09 - -skipNext12: - QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e - TESTQ $(1<<13), R14 - JE skipNext13 - MOVQ 13*24(AX), R9 - LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 - -skipNext13: - QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f - TESTQ $(1<<14), R14 - JE skipNext14 - MOVQ 14*24(AX), R9 - LONG $0x487c4162; WORD $0x3410; BYTE $0x09 - -skipNext14: - QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40 - TESTQ $(1<<15), R14 - JE skipNext15 - MOVQ 15*24(AX), R9 - LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 - -skipNext15: - QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1 - JMP lloop - -lastLoop: - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f - VZEROUPPER - RET - -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b -GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D -GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F -GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go deleted file mode 100644 index c2f71181f..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -//go:noescape -func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s deleted file mode 100644 index 9f444d49f..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s +++ /dev/null @@ -1,408 +0,0 @@ -//+build !noasm,!appengine - -// SHA256 implementation for AVX - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// This code is based on an Intel White-Paper: -// "Fast SHA-256 Implementations on Intel Architecture Processors" -// -// together with the reference implementation from the following authors: -// James Guilford -// Kirk Yap -// Tim Chen -// -// For Golang it has been converted to Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 -// equivalents -// - -#include "textflag.h" - -#define ROTATE_XS \ - MOVOU X4, X15 \ - MOVOU X5, X4 \ - MOVOU X6, X5 \ - MOVOU X7, X6 \ - MOVOU X15, X7 - -// compute s0 four at a time and s1 two at a time -// compute W[-16] + W[-7] 4 at a time -#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - LONG $0x0f41e3c4; WORD $0x04c6 \ // VPALIGNR XMM0,XMM7,XMM6,0x4 /* XTMP0 = W[-7] */ - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL f, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - XORL g, R15 \ // y2 = f^g - LONG $0xc4fef9c5 \ // VPADDD XMM0,XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) - ANDL e, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - \ - \ // compute s0 - \ - LONG $0x0f51e3c4; WORD $0x04cc \ // VPALIGNR XMM1,XMM5,XMM4,0x4 /* XTMP1 = W[-15] */ - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - \ // ROTATE_ARGS - MOVL a, R15 \ // y2 = a - LONG $0xd172e9c5; BYTE $0x07 \ // VPSRLD XMM2,XMM1,0x7 /* */ - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - LONG $0xf172e1c5; BYTE $0x19 \ // VPSLLD XMM3,XMM1,0x19 /* */ - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - LONG $0xdaebe1c5 \ // VPOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL d, R13 \ // y0 = e - MOVL h, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL d, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL e, R15 \ // y2 = f - ROLL $23, R14 \ // y1 = a >> (22-13) - LONG $0xd172e9c5; BYTE $0x12 \ // VPSRLD XMM2,XMM1,0x12 /* */ - XORL h, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL f, R15 \ // y2 = f^g - LONG $0xd172b9c5; BYTE $0x03 \ // VPSRLD XMM8,XMM1,0x3 /* XTMP4 = W[-15] >> 3 */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL d, R15 \ // y2 = (f^g)&e - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xf172f1c5; BYTE $0x0e \ // VPSLLD XMM1,XMM1,0xe /* */ - XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL f, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd9efe1c5 \ // VPXOR XMM3,XMM3,XMM1 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - LONG $0xdaefe1c5 \ // VPXOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ - MOVL h, R13 \ // y0 = a - ADDL R15, g \ // h = h + S1 + CH + k + w - MOVL h, R15 \ // y2 = a - LONG $0xef61c1c4; BYTE $0xc8 \ // VPXOR XMM1,XMM3,XMM8 /* XTMP1 = s0 */ - ORL b, R13 \ // y0 = a|c - ADDL g, c \ // d = d + h + S1 + CH + k + w - ANDL b, R15 \ // y2 = a&c - \ - \ // compute low s1 - \ - LONG $0xd770f9c5; BYTE $0xfa \ // VPSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ - ANDL a, R13 \ // y0 = (a|c)&b - ADDL R14, g \ // h = h + S1 + CH + k + w + S0 - LONG $0xc1fef9c5 \ // VPADDD XMM0,XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL c, R13 \ // y0 = e - MOVL g, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL c, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL d, R15 \ // y2 = f - XORL g, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - LONG $0xd272b9c5; BYTE $0x0a \ // VPSRLD XMM8,XMM2,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ - XORL e, R15 \ // y2 = f^g - LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ - XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL c, R15 \ // y2 = (f^g)&e - LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL e, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xc2ef39c5 \ // VPXOR XMM8,XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ - MOVL g, R13 \ // y0 = a - ADDL R15, f \ // h = h + S1 + CH + k + w - MOVL g, R15 \ // y2 = a - LONG $0x003942c4; BYTE $0xc2 \ // VPSHUFB XMM8,XMM8,XMM10 /* XTMP4 = s1 {00BA} */ - ORL a, R13 \ // y0 = a|c - ADDL f, b \ // d = d + h + S1 + CH + k + w - ANDL a, R15 \ // y2 = a&c - LONG $0xfe79c1c4; BYTE $0xc0 \ // VPADDD XMM0,XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ - ANDL h, R13 \ // y0 = (a|c)&b - ADDL R14, f \ // h = h + S1 + CH + k + w + S0 - \ - \ // compute high s1 - \ - LONG $0xd070f9c5; BYTE $0x50 \ // VPSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL b, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL f, R14 \ // y1 = a - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL b, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL c, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - LONG $0xd272a1c5; BYTE $0x0a \ // VPSRLD XMM11,XMM2,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ - XORL f, R14 \ // y1 = a ^ (a >> (22-13) - XORL d, R15 \ // y2 = f^g - LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ - XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL b, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ - XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL d, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xdaef21c5 \ // VPXOR XMM11,XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ - MOVL f, R13 \ // y0 = a - ADDL R15, e \ // h = h + S1 + CH + k + w - MOVL f, R15 \ // y2 = a - LONG $0x002142c4; BYTE $0xdc \ // VPSHUFB XMM11,XMM11,XMM12 /* XTMP5 = s1 {DC00} */ - ORL h, R13 \ // y0 = a|c - ADDL e, a \ // d = d + h + S1 + CH + k + w - ANDL h, R15 \ // y2 = a&c - LONG $0xe0fea1c5 \ // VPADDD XMM4,XMM11,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ - ANDL g, R13 \ // y0 = (a|c)&b - ADDL R14, e \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - ROTATE_XS - -#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL f, R15 \ // y2 = f - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL g, R15 \ // y2 = f^g - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - ANDL e, R15 \ // y2 = (f^g)&e - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - MOVL a, R15 \ // y2 = a - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ - -// func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) -TEXT ·blockAvx(SB), 7, $0-80 - - MOVQ h+0(FP), SI // SI: &h - MOVQ message_base+24(FP), R8 // &message - MOVQ message_len+32(FP), R9 // length of message - CMPQ R9, $0 - JEQ done_hash - ADDQ R8, R9 - MOVQ R9, reserved2+64(FP) // store end of message - - // Register definition - // a --> eax - // b --> ebx - // c --> ecx - // d --> r8d - // e --> edx - // f --> r9d - // g --> r10d - // h --> r11d - // - // y0 --> r13d - // y1 --> r14d - // y2 --> r15d - - MOVL (0*4)(SI), AX // a = H0 - MOVL (1*4)(SI), BX // b = H1 - MOVL (2*4)(SI), CX // c = H2 - MOVL (3*4)(SI), R8 // d = H3 - MOVL (4*4)(SI), DX // e = H4 - MOVL (5*4)(SI), R9 // f = H5 - MOVL (6*4)(SI), R10 // g = H6 - MOVL (7*4)(SI), R11 // h = H7 - - MOVOU bflipMask<>(SB), X13 - MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA - MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 - - MOVQ message_base+24(FP), SI // SI: &message - -loop0: - LEAQ constants<>(SB), BP - - // byte swap first 16 dwords - MOVOU 0*16(SI), X4 - LONG $0x0059c2c4; BYTE $0xe5 // VPSHUFB XMM4, XMM4, XMM13 - MOVOU 1*16(SI), X5 - LONG $0x0051c2c4; BYTE $0xed // VPSHUFB XMM5, XMM5, XMM13 - MOVOU 2*16(SI), X6 - LONG $0x0049c2c4; BYTE $0xf5 // VPSHUFB XMM6, XMM6, XMM13 - MOVOU 3*16(SI), X7 - LONG $0x0041c2c4; BYTE $0xfd // VPSHUFB XMM7, XMM7, XMM13 - - MOVQ SI, reserved3+72(FP) - MOVD $0x3, DI - - // schedule 48 input dwords, by doing 3 rounds of 16 each -loop1: - LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - LONG $0x4dfe59c5; BYTE $0x10 // VPADDD XMM9, XMM4, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - LONG $0x4dfe59c5; BYTE $0x20 // VPADDD XMM9, XMM4, 32[RBP] /* Add 3rd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - LONG $0x4dfe59c5; BYTE $0x30 // VPADDD XMM9, XMM4, 48[RBP] /* Add 4th constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $64, BP - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - SUBQ $1, DI - JNE loop1 - - MOVD $0x2, DI - -loop2: - LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) - DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) - DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) - DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) - - LONG $0x4dfe51c5; BYTE $0x10 // VPADDD XMM9, XMM5, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $32, BP - DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) - DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) - DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) - DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) - - MOVOU X6, X4 - MOVOU X7, X5 - - SUBQ $1, DI - JNE loop2 - - MOVQ h+0(FP), SI // SI: &h - ADDL (0*4)(SI), AX // H0 = a + H0 - MOVL AX, (0*4)(SI) - ADDL (1*4)(SI), BX // H1 = b + H1 - MOVL BX, (1*4)(SI) - ADDL (2*4)(SI), CX // H2 = c + H2 - MOVL CX, (2*4)(SI) - ADDL (3*4)(SI), R8 // H3 = d + H3 - MOVL R8, (3*4)(SI) - ADDL (4*4)(SI), DX // H4 = e + H4 - MOVL DX, (4*4)(SI) - ADDL (5*4)(SI), R9 // H5 = f + H5 - MOVL R9, (5*4)(SI) - ADDL (6*4)(SI), R10 // H6 = g + H6 - MOVL R10, (6*4)(SI) - ADDL (7*4)(SI), R11 // H7 = h + H7 - MOVL R11, (7*4)(SI) - - MOVQ reserved3+72(FP), SI - ADDQ $64, SI - CMPQ reserved2+64(FP), SI - JNE loop0 - -done_hash: - RET - -// Constants table -DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 -DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf -DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b -DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 -DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 -DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be -DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 -DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 -DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 -DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 -DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f -DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc -DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 -DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 -DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 -DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 -DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 -DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc -DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 -DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e -DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 -DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 -DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 -DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 -DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 -DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c -DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f -DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee -DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 -DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa -DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 - -DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 -DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b - -DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 -DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 - -GLOBL constants<>(SB), 8, $256 -GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 -GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 -GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go deleted file mode 100644 index 483689ef0..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go +++ /dev/null @@ -1,6 +0,0 @@ -//+build !noasm,!appengine - -package sha256 - -//go:noescape -func blockSha(h *[8]uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s deleted file mode 100644 index 909fc0ef8..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s +++ /dev/null @@ -1,266 +0,0 @@ -//+build !noasm,!appengine - -// SHA intrinsic version of SHA256 - -// Kristofer Peterson, (C) 2018. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "textflag.h" - -DATA K<>+0x00(SB)/4, $0x428a2f98 -DATA K<>+0x04(SB)/4, $0x71374491 -DATA K<>+0x08(SB)/4, $0xb5c0fbcf -DATA K<>+0x0c(SB)/4, $0xe9b5dba5 -DATA K<>+0x10(SB)/4, $0x3956c25b -DATA K<>+0x14(SB)/4, $0x59f111f1 -DATA K<>+0x18(SB)/4, $0x923f82a4 -DATA K<>+0x1c(SB)/4, $0xab1c5ed5 -DATA K<>+0x20(SB)/4, $0xd807aa98 -DATA K<>+0x24(SB)/4, $0x12835b01 -DATA K<>+0x28(SB)/4, $0x243185be -DATA K<>+0x2c(SB)/4, $0x550c7dc3 -DATA K<>+0x30(SB)/4, $0x72be5d74 -DATA K<>+0x34(SB)/4, $0x80deb1fe -DATA K<>+0x38(SB)/4, $0x9bdc06a7 -DATA K<>+0x3c(SB)/4, $0xc19bf174 -DATA K<>+0x40(SB)/4, $0xe49b69c1 -DATA K<>+0x44(SB)/4, $0xefbe4786 -DATA K<>+0x48(SB)/4, $0x0fc19dc6 -DATA K<>+0x4c(SB)/4, $0x240ca1cc -DATA K<>+0x50(SB)/4, $0x2de92c6f -DATA K<>+0x54(SB)/4, $0x4a7484aa -DATA K<>+0x58(SB)/4, $0x5cb0a9dc -DATA K<>+0x5c(SB)/4, $0x76f988da -DATA K<>+0x60(SB)/4, $0x983e5152 -DATA K<>+0x64(SB)/4, $0xa831c66d -DATA K<>+0x68(SB)/4, $0xb00327c8 -DATA K<>+0x6c(SB)/4, $0xbf597fc7 -DATA K<>+0x70(SB)/4, $0xc6e00bf3 -DATA K<>+0x74(SB)/4, $0xd5a79147 -DATA K<>+0x78(SB)/4, $0x06ca6351 -DATA K<>+0x7c(SB)/4, $0x14292967 -DATA K<>+0x80(SB)/4, $0x27b70a85 -DATA K<>+0x84(SB)/4, $0x2e1b2138 -DATA K<>+0x88(SB)/4, $0x4d2c6dfc -DATA K<>+0x8c(SB)/4, $0x53380d13 -DATA K<>+0x90(SB)/4, $0x650a7354 -DATA K<>+0x94(SB)/4, $0x766a0abb -DATA K<>+0x98(SB)/4, $0x81c2c92e -DATA K<>+0x9c(SB)/4, $0x92722c85 -DATA K<>+0xa0(SB)/4, $0xa2bfe8a1 -DATA K<>+0xa4(SB)/4, $0xa81a664b -DATA K<>+0xa8(SB)/4, $0xc24b8b70 -DATA K<>+0xac(SB)/4, $0xc76c51a3 -DATA K<>+0xb0(SB)/4, $0xd192e819 -DATA K<>+0xb4(SB)/4, $0xd6990624 -DATA K<>+0xb8(SB)/4, $0xf40e3585 -DATA K<>+0xbc(SB)/4, $0x106aa070 -DATA K<>+0xc0(SB)/4, $0x19a4c116 -DATA K<>+0xc4(SB)/4, $0x1e376c08 -DATA K<>+0xc8(SB)/4, $0x2748774c -DATA K<>+0xcc(SB)/4, $0x34b0bcb5 -DATA K<>+0xd0(SB)/4, $0x391c0cb3 -DATA K<>+0xd4(SB)/4, $0x4ed8aa4a -DATA K<>+0xd8(SB)/4, $0x5b9cca4f -DATA K<>+0xdc(SB)/4, $0x682e6ff3 -DATA K<>+0xe0(SB)/4, $0x748f82ee -DATA K<>+0xe4(SB)/4, $0x78a5636f -DATA K<>+0xe8(SB)/4, $0x84c87814 -DATA K<>+0xec(SB)/4, $0x8cc70208 -DATA K<>+0xf0(SB)/4, $0x90befffa -DATA K<>+0xf4(SB)/4, $0xa4506ceb -DATA K<>+0xf8(SB)/4, $0xbef9a3f7 -DATA K<>+0xfc(SB)/4, $0xc67178f2 -GLOBL K<>(SB), RODATA|NOPTR, $256 - -DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203 -DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b -GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16 - -// Register Usage -// BX base address of constant table (constant) -// DX hash_state (constant) -// SI hash_data.data -// DI hash_data.data + hash_data.length - 64 (constant) -// X0 scratch -// X1 scratch -// X2 working hash state // ABEF -// X3 working hash state // CDGH -// X4 first 16 bytes of block -// X5 second 16 bytes of block -// X6 third 16 bytes of block -// X7 fourth 16 bytes of block -// X12 saved hash state // ABEF -// X13 saved hash state // CDGH -// X15 data shuffle mask (constant) - -TEXT ·blockSha(SB), NOSPLIT, $0-32 - MOVQ h+0(FP), DX - MOVQ message_base+8(FP), SI - MOVQ message_len+16(FP), DI - LEAQ -64(SI)(DI*1), DI - MOVOU (DX), X2 - MOVOU 16(DX), X1 - MOVO X2, X3 - PUNPCKLLQ X1, X2 - PUNPCKHLQ X1, X3 - PSHUFD $0x27, X2, X2 - PSHUFD $0x27, X3, X3 - MOVO SHUF_MASK<>(SB), X15 - LEAQ K<>(SB), BX - - JMP TEST - -LOOP: - MOVO X2, X12 - MOVO X3, X13 - - // load block and shuffle - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOU 32(SI), X6 - MOVOU 48(SI), X7 - PSHUFB X15, X4 - PSHUFB X15, X5 - PSHUFB X15, X6 - PSHUFB X15, X7 - -#define ROUND456 \ - PADDL X5, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X5, X1 \ - LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4 - PADDL X1, X6 \ - LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 - -#define ROUND567 \ - PADDL X6, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X6, X1 \ - LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4 - PADDL X1, X7 \ - LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 - -#define ROUND674 \ - PADDL X7, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X7, X1 \ - LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4 - PADDL X1, X4 \ - LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7 - -#define ROUND745 \ - PADDL X4, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X4, X1 \ - LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4 - PADDL X1, X5 \ - LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4 - - // rounds 0-3 - MOVO (BX), X0 - PADDL X4, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 4-7 - MOVO 1*16(BX), X0 - PADDL X5, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 - - // rounds 8-11 - MOVO 2*16(BX), X0 - PADDL X6, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 - - MOVO 3*16(BX), X0; ROUND674 // rounds 12-15 - MOVO 4*16(BX), X0; ROUND745 // rounds 16-19 - MOVO 5*16(BX), X0; ROUND456 // rounds 20-23 - MOVO 6*16(BX), X0; ROUND567 // rounds 24-27 - MOVO 7*16(BX), X0; ROUND674 // rounds 28-31 - MOVO 8*16(BX), X0; ROUND745 // rounds 32-35 - MOVO 9*16(BX), X0; ROUND456 // rounds 36-39 - MOVO 10*16(BX), X0; ROUND567 // rounds 40-43 - MOVO 11*16(BX), X0; ROUND674 // rounds 44-47 - MOVO 12*16(BX), X0; ROUND745 // rounds 48-51 - - // rounds 52-55 - MOVO 13*16(BX), X0 - PADDL X5, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - MOVO X5, X1 - LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4 - PADDL X1, X6 - LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 56-59 - MOVO 14*16(BX), X0 - PADDL X6, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - MOVO X6, X1 - LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4 - PADDL X1, X7 - LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 60-63 - MOVO 15*16(BX), X0 - PADDL X7, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - PADDL X12, X2 - PADDL X13, X3 - - ADDQ $64, SI - -TEST: - CMPQ SI, DI - JBE LOOP - - PSHUFD $0x4e, X3, X0 - LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0 - PSHUFD $0x4e, X2, X1 - LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f - PSHUFD $0x1b, X0, X0 - PSHUFD $0x1b, X1, X1 - - MOVOU X0, (DX) - MOVOU X1, 16(DX) - - RET diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go deleted file mode 100644 index 1ae2320bd..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -//go:noescape -func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s deleted file mode 100644 index 7afb45c87..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s +++ /dev/null @@ -1,429 +0,0 @@ -//+build !noasm,!appengine - -// SHA256 implementation for SSSE3 - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// This code is based on an Intel White-Paper: -// "Fast SHA-256 Implementations on Intel Architecture Processors" -// -// together with the reference implementation from the following authors: -// James Guilford -// Kirk Yap -// Tim Chen -// -// For Golang it has been converted to Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 -// equivalents -// - -#include "textflag.h" - -#define ROTATE_XS \ - MOVOU X4, X15 \ - MOVOU X5, X4 \ - MOVOU X6, X5 \ - MOVOU X7, X6 \ - MOVOU X15, X7 - -// compute s0 four at a time and s1 two at a time -// compute W[-16] + W[-7] 4 at a time -#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - MOVOU X7, X0 \ - LONG $0x0f3a0f66; WORD $0x04c6 \ // PALIGNR XMM0,XMM6,0x4 /* XTMP0 = W[-7] */ - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL f, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - XORL g, R15 \ // y2 = f^g - LONG $0xc4fe0f66 \ // PADDD XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) - ANDL e, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - \ - \ // compute s0 - \ - MOVOU X5, X1 \ - LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1,XMM4,0x4 /* XTMP1 = W[-15] */ - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - \ // ROTATE_ARGS - MOVL a, R15 \ // y2 = a - MOVOU X1, X2 \ - LONG $0xd2720f66; BYTE $0x07 \ // PSRLD XMM2,0x7 /* */ - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - MOVOU X1, X3 \ - LONG $0xf3720f66; BYTE $0x19 \ // PSLLD XMM3,0x19 /* */ - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - LONG $0xdaeb0f66 \ // POR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL d, R13 \ // y0 = e - MOVL h, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL d, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL e, R15 \ // y2 = f - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVOU X1, X2 \ - LONG $0xd2720f66; BYTE $0x12 \ // PSRLD XMM2,0x12 /* */ - XORL h, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL f, R15 \ // y2 = f^g - MOVOU X1, X8 \ - LONG $0x720f4166; WORD $0x03d0 \ // PSRLD XMM8,0x3 /* XTMP4 = W[-15] >> 3 */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL d, R15 \ // y2 = (f^g)&e - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xf1720f66; BYTE $0x0e \ // PSLLD XMM1,0xe /* */ - XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL f, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd9ef0f66 \ // PXOR XMM3,XMM1 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - LONG $0xdaef0f66 \ // PXOR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ - MOVL h, R13 \ // y0 = a - ADDL R15, g \ // h = h + S1 + CH + k + w - MOVL h, R15 \ // y2 = a - MOVOU X3, X1 \ - LONG $0xef0f4166; BYTE $0xc8 \ // PXOR XMM1,XMM8 /* XTMP1 = s0 */ - ORL b, R13 \ // y0 = a|c - ADDL g, c \ // d = d + h + S1 + CH + k + w - ANDL b, R15 \ // y2 = a&c - \ - \ // compute low s1 - \ - LONG $0xd7700f66; BYTE $0xfa \ // PSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ - ANDL a, R13 \ // y0 = (a|c)&b - ADDL R14, g \ // h = h + S1 + CH + k + w + S0 - LONG $0xc1fe0f66 \ // PADDD XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL c, R13 \ // y0 = e - MOVL g, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL c, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL d, R15 \ // y2 = f - XORL g, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - MOVOU X2, X8 \ - LONG $0x720f4166; WORD $0x0ad0 \ // PSRLD XMM8,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ - XORL e, R15 \ // y2 = f^g - MOVOU X2, X3 \ - LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ - XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL c, R15 \ // y2 = (f^g)&e - LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL e, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xef0f4466; BYTE $0xc2 \ // PXOR XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ - MOVL g, R13 \ // y0 = a - ADDL R15, f \ // h = h + S1 + CH + k + w - MOVL g, R15 \ // y2 = a - LONG $0x380f4566; WORD $0xc200 \ // PSHUFB XMM8,XMM10 /* XTMP4 = s1 {00BA} */ - ORL a, R13 \ // y0 = a|c - ADDL f, b \ // d = d + h + S1 + CH + k + w - ANDL a, R15 \ // y2 = a&c - LONG $0xfe0f4166; BYTE $0xc0 \ // PADDD XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ - ANDL h, R13 \ // y0 = (a|c)&b - ADDL R14, f \ // h = h + S1 + CH + k + w + S0 - \ - \ // compute high s1 - \ - LONG $0xd0700f66; BYTE $0x50 \ // PSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL b, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL f, R14 \ // y1 = a - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL b, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL c, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - MOVOU X2, X11 \ - LONG $0x720f4166; WORD $0x0ad3 \ // PSRLD XMM11,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ - XORL f, R14 \ // y1 = a ^ (a >> (22-13) - XORL d, R15 \ // y2 = f^g - MOVOU X2, X3 \ - LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ - XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL b, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ - XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL d, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xef0f4466; BYTE $0xda \ // PXOR XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ - MOVL f, R13 \ // y0 = a - ADDL R15, e \ // h = h + S1 + CH + k + w - MOVL f, R15 \ // y2 = a - LONG $0x380f4566; WORD $0xdc00 \ // PSHUFB XMM11,XMM12 /* XTMP5 = s1 {DC00} */ - ORL h, R13 \ // y0 = a|c - ADDL e, a \ // d = d + h + S1 + CH + k + w - ANDL h, R15 \ // y2 = a&c - MOVOU X11, X4 \ - LONG $0xe0fe0f66 \ // PADDD XMM4,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ - ANDL g, R13 \ // y0 = (a|c)&b - ADDL R14, e \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - ROTATE_XS - -#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL f, R15 \ // y2 = f - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL g, R15 \ // y2 = f^g - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - ANDL e, R15 \ // y2 = (f^g)&e - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - MOVL a, R15 \ // y2 = a - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ - -// func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) -TEXT ·blockSsse(SB), 7, $0-80 - - MOVQ h+0(FP), SI // SI: &h - MOVQ message_base+24(FP), R8 // &message - MOVQ message_len+32(FP), R9 // length of message - CMPQ R9, $0 - JEQ done_hash - ADDQ R8, R9 - MOVQ R9, reserved2+64(FP) // store end of message - - // Register definition - // a --> eax - // b --> ebx - // c --> ecx - // d --> r8d - // e --> edx - // f --> r9d - // g --> r10d - // h --> r11d - // - // y0 --> r13d - // y1 --> r14d - // y2 --> r15d - - MOVL (0*4)(SI), AX // a = H0 - MOVL (1*4)(SI), BX // b = H1 - MOVL (2*4)(SI), CX // c = H2 - MOVL (3*4)(SI), R8 // d = H3 - MOVL (4*4)(SI), DX // e = H4 - MOVL (5*4)(SI), R9 // f = H5 - MOVL (6*4)(SI), R10 // g = H6 - MOVL (7*4)(SI), R11 // h = H7 - - MOVOU bflipMask<>(SB), X13 - MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA - MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 - - MOVQ message_base+24(FP), SI // SI: &message - -loop0: - LEAQ constants<>(SB), BP - - // byte swap first 16 dwords - MOVOU 0*16(SI), X4 - LONG $0x380f4166; WORD $0xe500 // PSHUFB XMM4, XMM13 - MOVOU 1*16(SI), X5 - LONG $0x380f4166; WORD $0xed00 // PSHUFB XMM5, XMM13 - MOVOU 2*16(SI), X6 - LONG $0x380f4166; WORD $0xf500 // PSHUFB XMM6, XMM13 - MOVOU 3*16(SI), X7 - LONG $0x380f4166; WORD $0xfd00 // PSHUFB XMM7, XMM13 - - MOVQ SI, reserved3+72(FP) - MOVD $0x3, DI - - // Align - // nop WORD PTR [rax+rax*1+0x0] - - // schedule 48 input dwords, by doing 3 rounds of 16 each -loop1: - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x204d // PADDD XMM9, 32[RBP] /* Add 3rd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x304d // PADDD XMM9, 48[RBP] /* Add 4th constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $64, BP - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - SUBQ $1, DI - JNE loop1 - - MOVD $0x2, DI - -loop2: - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) - DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) - DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) - DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) - - MOVOU X5, X9 - LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $32, BP - DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) - DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) - DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) - DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) - - MOVOU X6, X4 - MOVOU X7, X5 - - SUBQ $1, DI - JNE loop2 - - MOVQ h+0(FP), SI // SI: &h - ADDL (0*4)(SI), AX // H0 = a + H0 - MOVL AX, (0*4)(SI) - ADDL (1*4)(SI), BX // H1 = b + H1 - MOVL BX, (1*4)(SI) - ADDL (2*4)(SI), CX // H2 = c + H2 - MOVL CX, (2*4)(SI) - ADDL (3*4)(SI), R8 // H3 = d + H3 - MOVL R8, (3*4)(SI) - ADDL (4*4)(SI), DX // H4 = e + H4 - MOVL DX, (4*4)(SI) - ADDL (5*4)(SI), R9 // H5 = f + H5 - MOVL R9, (5*4)(SI) - ADDL (6*4)(SI), R10 // H6 = g + H6 - MOVL R10, (6*4)(SI) - ADDL (7*4)(SI), R11 // H7 = h + H7 - MOVL R11, (7*4)(SI) - - MOVQ reserved3+72(FP), SI - ADDQ $64, SI - CMPQ reserved2+64(FP), SI - JNE loop0 - -done_hash: - RET - -// Constants table -DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 -DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf -DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b -DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 -DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 -DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be -DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 -DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 -DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 -DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 -DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f -DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc -DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 -DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 -DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 -DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 -DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 -DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc -DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 -DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e -DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 -DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 -DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 -DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 -DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 -DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c -DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f -DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee -DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 -DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa -DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 - -DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 -DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b - -DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 -DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 - -GLOBL constants<>(SB), 8, $256 -GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 -GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 -GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go deleted file mode 100644 index 1c4d97f0c..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go +++ /dev/null @@ -1,53 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockArmGo(dig *digest, p []byte) {} - -func blockAvxGo(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockAvx(h[:], p[:], 0, 0, 0, 0) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] -} - -func blockAvx2Go(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockAvx2(h[:], p[:]) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] -} - -func blockSsseGo(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockSsse(h[:], p[:], 0, 0, 0, 0) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] -} - -func blockShaGo(dig *digest, p []byte) { - - blockSha(&dig.h, p) -} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go deleted file mode 100644 index 0979c20ae..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go +++ /dev/null @@ -1,37 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockAvx2Go(dig *digest, p []byte) {} -func blockAvxGo(dig *digest, p []byte) {} -func blockSsseGo(dig *digest, p []byte) {} -func blockShaGo(dig *digest, p []byte) {} - -//go:noescape -func blockArm(h []uint32, message []uint8) - -func blockArmGo(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockArm(h[:], p[:]) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], - h[5], h[6], h[7] -} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s deleted file mode 100644 index c6ddb3717..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s +++ /dev/null @@ -1,192 +0,0 @@ -//+build !noasm,!appengine - -// ARM64 version of SHA256 - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// Based on implementation as found in https://github.com/jocover/sha256-armv8 -// -// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to -// their Plan9 equivalents -// - -TEXT ·blockArm(SB), 7, $0 - MOVD h+0(FP), R0 - MOVD message+24(FP), R1 - MOVD message_len+32(FP), R2 // length of message - SUBS $64, R2 - BMI complete - - // Load constants table pointer - MOVD $·constants(SB), R3 - - // Cache constants table in registers v16 - v31 - WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64 - WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16 - WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64 - - WORD $0x4c407801 // ld1 {v1.4s}, [x0] - WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64 - WORD $0xd1004000 // sub x0, x0, #0x10 - WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64 - -loop: - // Main loop - WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64 - WORD $0x4ea01c02 // mov v2.16b, v0.16b - WORD $0x4ea11c23 // mov v3.16b, v1.16b - WORD $0x6e2008a5 // rev32 v5.16b, v5.16b - WORD $0x6e2008c6 // rev32 v6.16b, v6.16b - WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s - WORD $0x6e2008e7 // rev32 v7.16b, v7.16b - WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x6e200908 // rev32 v8.16b, v8.16b - WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s - WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s - - SUBS $64, R2 - BPL loop - - // Store result - WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0] - -complete: - RET - -// Constants table -DATA ·constants+0x0(SB)/8, $0x71374491428a2f98 -DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf -DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b -DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4 -DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98 -DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be -DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74 -DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7 -DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1 -DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6 -DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f -DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc -DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152 -DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8 -DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3 -DATA ·constants+0x78(SB)/8, $0x1429296706ca6351 -DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85 -DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc -DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354 -DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e -DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1 -DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70 -DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819 -DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585 -DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116 -DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c -DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f -DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee -DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814 -DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa -DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7 - -GLOBL ·constants(SB), 8, $256 - diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go deleted file mode 100644 index 0187c950a..000000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_other.go +++ /dev/null @@ -1,25 +0,0 @@ -//+build appengine noasm !amd64,!arm64 - -/* - * Minio Cloud Storage, (C) 2019 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockAvx2Go(dig *digest, p []byte) {} -func blockAvxGo(dig *digest, p []byte) {} -func blockSsseGo(dig *digest, p []byte) {} -func blockShaGo(dig *digest, p []byte) {} -func blockArmGo(dig *digest, p []byte) {} diff --git a/vendor/github.com/minio/sha256-simd/test-architectures.sh b/vendor/github.com/minio/sha256-simd/test-architectures.sh deleted file mode 100644 index 50150eaab..000000000 --- a/vendor/github.com/minio/sha256-simd/test-architectures.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -e - -go tool dist list | while IFS=/ read os arch; do - echo "Checking $os/$arch..." - echo " normal" - GOARCH=$arch GOOS=$os go build -o /dev/null ./... - echo " noasm" - GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./... - echo " appengine" - GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./... - echo " noasm,appengine" - GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./... -done diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5b..000000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod deleted file mode 100644 index 7efa09a04..000000000 --- a/vendor/github.com/mitchellh/go-homedir/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 25378537e..000000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,167 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -// Reset clears the cache, forcing the next call to Dir to re-detect -// the home directory. This generally never has to be called, but can be -// useful in tests if you're modifying the home directory via the HOME -// env var or something. -func Reset() { - cacheLock.Lock() - defer cacheLock.Unlock() - homedirCache = "" -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/ncw/swift/.gitignore b/vendor/github.com/ncw/swift/.gitignore deleted file mode 100644 index 5cdbab794..000000000 --- a/vendor/github.com/ncw/swift/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*~ -*.pyc -test-env* -junk/ \ No newline at end of file diff --git a/vendor/github.com/ncw/swift/.travis.yml b/vendor/github.com/ncw/swift/.travis.yml deleted file mode 100644 index e0a61643b..000000000 --- a/vendor/github.com/ncw/swift/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -language: go -sudo: false - -go: - - 1.2.x - - 1.3.x - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - master - -matrix: - include: - - go: 1.12.x - env: TEST_REAL_SERVER=rackspace - - go: 1.12.x - env: TEST_REAL_SERVER=memset - allow_failures: - - go: 1.12.x - env: TEST_REAL_SERVER=rackspace - - go: 1.12.x - env: TEST_REAL_SERVER=memset -install: go test -i ./... -script: - - test -z "$(go fmt ./...)" - - go test - - ./travis_realserver.sh diff --git a/vendor/github.com/ncw/swift/COPYING b/vendor/github.com/ncw/swift/COPYING deleted file mode 100644 index 8c27c67fd..000000000 --- a/vendor/github.com/ncw/swift/COPYING +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md deleted file mode 100644 index 0a09293d8..000000000 --- a/vendor/github.com/ncw/swift/README.md +++ /dev/null @@ -1,156 +0,0 @@ -Swift -===== - -This package provides an easy to use library for interfacing with -Swift / Openstack Object Storage / Rackspace cloud files from the Go -Language - -See here for package docs - - http://godoc.org/github.com/ncw/swift - -[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift) - -Install -------- - -Use go to install the library - - go get github.com/ncw/swift - -Usage ------ - -See here for full package docs - -- http://godoc.org/github.com/ncw/swift - -Here is a short example from the docs -```go -import "github.com/ncw/swift" - -// Create a connection -c := swift.Connection{ - UserName: "user", - ApiKey: "key", - AuthUrl: "auth_url", - Domain: "domain", // Name of the domain (v3 auth only) - Tenant: "tenant", // Name of the tenant (v2 auth only) -} -// Authenticate -err := c.Authenticate() -if err != nil { - panic(err) -} -// List all the containers -containers, err := c.ContainerNames(nil) -fmt.Println(containers) -// etc... -``` - -Additions ---------- - -The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. - -Testing -------- - -To run the tests you can either use an embedded fake Swift server -either use a real Openstack Swift server or a Rackspace Cloud files account. - -When using a real Swift server, you need to set these environment variables -before running the tests - - export SWIFT_API_USER='user' - export SWIFT_API_KEY='key' - export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' - -And optionally these if using v2 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - -And optionally these if using v3 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - export SWIFT_API_DOMAIN_ID='domain id' - export SWIFT_API_DOMAIN='domain name' - -And optionally these if using v3 trust - - export SWIFT_TRUST_ID='TrustId' - -And optionally this if you want to skip server certificate validation - - export SWIFT_AUTH_INSECURE=1 - -And optionally this to configure the connect channel timeout, in seconds - - export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 - -And optionally this to configure the data channel timeout, in seconds - - export SWIFT_DATA_CHANNEL_TIMEOUT=60 - -Then run the tests with `go test` - -License -------- - -This is free software under the terms of MIT license (check COPYING file -included in this package). - -Contact and support -------------------- - -The project website is at: - -- https://github.com/ncw/swift - -There you can file bug reports, ask for help or contribute patches. - -Authors -------- - -- Nick Craig-Wood - -Contributors ------------- - -- Brian "bojo" Jones -- Janika Liiv -- Yamamoto, Hirotaka -- Stephen -- platformpurple -- Paul Querna -- Livio Soares -- thesyncim -- lsowen -- Sylvain Baubeau -- Chris Kastorff -- Dai HaoJun -- Hua Wang -- Fabian Ruff -- Arturo Reuschenbach Puncernau -- Petr Kotek -- Stefan Majewsky -- Cezar Sa Espinola -- Sam Gunaratne -- Richard Scothern -- Michel Couillard -- Christopher Waldon -- dennis -- hag -- Alexander Neumann -- eclipseo <30413512+eclipseo@users.noreply.github.com> -- Yuri Per -- Falk Reimann -- Arthur Paim Arnold -- Bruno Michel -- Charles Hsu -- Omar Ali -- Andreas Andersen -- kayrus -- CodeLingo Bot diff --git a/vendor/github.com/ncw/swift/auth.go b/vendor/github.com/ncw/swift/auth.go deleted file mode 100644 index 25654f429..000000000 --- a/vendor/github.com/ncw/swift/auth.go +++ /dev/null @@ -1,335 +0,0 @@ -package swift - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "strings" - "time" -) - -// Auth defines the operations needed to authenticate with swift -// -// This encapsulates the different authentication schemes in use -type Authenticator interface { - // Request creates an http.Request for the auth - return nil if not needed - Request(*Connection) (*http.Request, error) - // Response parses the http.Response - Response(resp *http.Response) error - // The public storage URL - set Internal to true to read - // internal/service net URL - StorageUrl(Internal bool) string - // The access token - Token() string - // The CDN url if available - CdnUrl() string -} - -// Expireser is an optional interface to read the expiration time of the token -type Expireser interface { - Expires() time.Time -} - -type CustomEndpointAuthenticator interface { - StorageUrlForEndpoint(endpointType EndpointType) string -} - -type EndpointType string - -const ( - // Use public URL as storage URL - EndpointTypePublic = EndpointType("public") - - // Use internal URL as storage URL - EndpointTypeInternal = EndpointType("internal") - - // Use admin URL as storage URL - EndpointTypeAdmin = EndpointType("admin") -) - -// newAuth - create a new Authenticator from the AuthUrl -// -// A hint for AuthVersion can be provided -func newAuth(c *Connection) (Authenticator, error) { - AuthVersion := c.AuthVersion - if AuthVersion == 0 { - if strings.Contains(c.AuthUrl, "v3") { - AuthVersion = 3 - } else if strings.Contains(c.AuthUrl, "v2") { - AuthVersion = 2 - } else if strings.Contains(c.AuthUrl, "v1") { - AuthVersion = 1 - } else { - return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly") - } - } - switch AuthVersion { - case 1: - return &v1Auth{}, nil - case 2: - return &v2Auth{ - // Guess as to whether using API key or - // password it will try both eventually so - // this is just an optimization. - useApiKey: len(c.ApiKey) >= 32, - }, nil - case 3: - return &v3Auth{}, nil - } - return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion) -} - -// ------------------------------------------------------------ - -// v1 auth -type v1Auth struct { - Headers http.Header // V1 auth: the authentication headers so extensions can access them -} - -// v1 Authentication - make request -func (auth *v1Auth) Request(c *Connection) (*http.Request, error) { - req, err := http.NewRequest("GET", c.AuthUrl, nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", c.UserAgent) - req.Header.Set("X-Auth-Key", c.ApiKey) - req.Header.Set("X-Auth-User", c.UserName) - return req, nil -} - -// v1 Authentication - read response -func (auth *v1Auth) Response(resp *http.Response) error { - auth.Headers = resp.Header - return nil -} - -// v1 Authentication - read storage url -func (auth *v1Auth) StorageUrl(Internal bool) string { - storageUrl := auth.Headers.Get("X-Storage-Url") - if Internal { - newUrl, err := url.Parse(storageUrl) - if err != nil { - return storageUrl - } - newUrl.Host = "snet-" + newUrl.Host - storageUrl = newUrl.String() - } - return storageUrl -} - -// v1 Authentication - read auth token -func (auth *v1Auth) Token() string { - return auth.Headers.Get("X-Auth-Token") -} - -// v1 Authentication - read cdn url -func (auth *v1Auth) CdnUrl() string { - return auth.Headers.Get("X-CDN-Management-Url") -} - -// ------------------------------------------------------------ - -// v2 Authentication -type v2Auth struct { - Auth *v2AuthResponse - Region string - useApiKey bool // if set will use API key not Password - useApiKeyOk bool // if set won't change useApiKey any more - notFirst bool // set after first run -} - -// v2 Authentication - make request -func (auth *v2Auth) Request(c *Connection) (*http.Request, error) { - auth.Region = c.Region - // Toggle useApiKey if not first run and not OK yet - if auth.notFirst && !auth.useApiKeyOk { - auth.useApiKey = !auth.useApiKey - } - auth.notFirst = true - // Create a V2 auth request for the body of the connection - var v2i interface{} - if !auth.useApiKey { - // Normal swift authentication - v2 := v2AuthRequest{} - v2.Auth.PasswordCredentials.UserName = c.UserName - v2.Auth.PasswordCredentials.Password = c.ApiKey - v2.Auth.Tenant = c.Tenant - v2.Auth.TenantId = c.TenantId - v2i = v2 - } else { - // Rackspace special with API Key - v2 := v2AuthRequestRackspace{} - v2.Auth.ApiKeyCredentials.UserName = c.UserName - v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey - v2.Auth.Tenant = c.Tenant - v2.Auth.TenantId = c.TenantId - v2i = v2 - } - body, err := json.Marshal(v2i) - if err != nil { - return nil, err - } - url := c.AuthUrl - if !strings.HasSuffix(url, "/") { - url += "/" - } - url += "tokens" - req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", c.UserAgent) - return req, nil -} - -// v2 Authentication - read response -func (auth *v2Auth) Response(resp *http.Response) error { - auth.Auth = new(v2AuthResponse) - err := readJson(resp, auth.Auth) - // If successfully read Auth then no need to toggle useApiKey any more - if err == nil { - auth.useApiKeyOk = true - } - return err -} - -// Finds the Endpoint Url of "type" from the v2AuthResponse using the -// Region if set or defaulting to the first one if not -// -// Returns "" if not found -func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string { - for _, catalog := range auth.Auth.Access.ServiceCatalog { - if catalog.Type == Type { - for _, endpoint := range catalog.Endpoints { - if auth.Region == "" || (auth.Region == endpoint.Region) { - switch endpointType { - case EndpointTypeInternal: - return endpoint.InternalUrl - case EndpointTypePublic: - return endpoint.PublicUrl - case EndpointTypeAdmin: - return endpoint.AdminUrl - default: - return "" - } - } - } - } - } - return "" -} - -// v2 Authentication - read storage url -// -// If Internal is true then it reads the private (internal / service -// net) URL. -func (auth *v2Auth) StorageUrl(Internal bool) string { - endpointType := EndpointTypePublic - if Internal { - endpointType = EndpointTypeInternal - } - return auth.StorageUrlForEndpoint(endpointType) -} - -// v2 Authentication - read storage url -// -// Use the indicated endpointType to choose a URL. -func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string { - return auth.endpointUrl("object-store", endpointType) -} - -// v2 Authentication - read auth token -func (auth *v2Auth) Token() string { - return auth.Auth.Access.Token.Id -} - -// v2 Authentication - read expires -func (auth *v2Auth) Expires() time.Time { - t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires) - if err != nil { - return time.Time{} // return Zero if not parsed - } - return t -} - -// v2 Authentication - read cdn url -func (auth *v2Auth) CdnUrl() string { - return auth.endpointUrl("rax:object-cdn", EndpointTypePublic) -} - -// ------------------------------------------------------------ - -// V2 Authentication request -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthRequest struct { - Auth struct { - PasswordCredentials struct { - UserName string `json:"username"` - Password string `json:"password"` - } `json:"passwordCredentials"` - Tenant string `json:"tenantName,omitempty"` - TenantId string `json:"tenantId,omitempty"` - } `json:"auth"` -} - -// V2 Authentication request - Rackspace variant -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthRequestRackspace struct { - Auth struct { - ApiKeyCredentials struct { - UserName string `json:"username"` - ApiKey string `json:"apiKey"` - } `json:"RAX-KSKEY:apiKeyCredentials"` - Tenant string `json:"tenantName,omitempty"` - TenantId string `json:"tenantId,omitempty"` - } `json:"auth"` -} - -// V2 Authentication reply -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthResponse struct { - Access struct { - ServiceCatalog []struct { - Endpoints []struct { - InternalUrl string - PublicUrl string - AdminUrl string - Region string - TenantId string - } - Name string - Type string - } - Token struct { - Expires string - Id string - Tenant struct { - Id string - Name string - } - } - User struct { - DefaultRegion string `json:"RAX-AUTH:defaultRegion"` - Id string - Name string - Roles []struct { - Description string - Id string - Name string - TenantId string - } - } - } -} diff --git a/vendor/github.com/ncw/swift/auth_v3.go b/vendor/github.com/ncw/swift/auth_v3.go deleted file mode 100644 index 1e34ad814..000000000 --- a/vendor/github.com/ncw/swift/auth_v3.go +++ /dev/null @@ -1,300 +0,0 @@ -package swift - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "strings" - "time" -) - -const ( - v3AuthMethodToken = "token" - v3AuthMethodPassword = "password" - v3AuthMethodApplicationCredential = "application_credential" - v3CatalogTypeObjectStore = "object-store" -) - -// V3 Authentication request -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://developer.openstack.org/api-ref-identity-v3.html -type v3AuthRequest struct { - Auth struct { - Identity struct { - Methods []string `json:"methods"` - Password *v3AuthPassword `json:"password,omitempty"` - Token *v3AuthToken `json:"token,omitempty"` - ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"` - } `json:"identity"` - Scope *v3Scope `json:"scope,omitempty"` - } `json:"auth"` -} - -type v3Scope struct { - Project *v3Project `json:"project,omitempty"` - Domain *v3Domain `json:"domain,omitempty"` - Trust *v3Trust `json:"OS-TRUST:trust,omitempty"` -} - -type v3Domain struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type v3Project struct { - Name string `json:"name,omitempty"` - Id string `json:"id,omitempty"` - Domain *v3Domain `json:"domain,omitempty"` -} - -type v3Trust struct { - Id string `json:"id"` -} - -type v3User struct { - Domain *v3Domain `json:"domain,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Password string `json:"password,omitempty"` -} - -type v3AuthToken struct { - Id string `json:"id"` -} - -type v3AuthPassword struct { - User v3User `json:"user"` -} - -type v3AuthApplicationCredential struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Secret string `json:"secret,omitempty"` - User *v3User `json:"user,omitempty"` -} - -// V3 Authentication response -type v3AuthResponse struct { - Token struct { - ExpiresAt string `json:"expires_at"` - IssuedAt string `json:"issued_at"` - Methods []string - Roles []struct { - Id, Name string - Links struct { - Self string - } - } - - Project struct { - Domain struct { - Id, Name string - } - Id, Name string - } - - Catalog []struct { - Id, Namem, Type string - Endpoints []struct { - Id, Region_Id, Url, Region string - Interface EndpointType - } - } - - User struct { - Id, Name string - Domain struct { - Id, Name string - Links struct { - Self string - } - } - } - - Audit_Ids []string - } -} - -type v3Auth struct { - Region string - Auth *v3AuthResponse - Headers http.Header -} - -func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { - auth.Region = c.Region - - var v3i interface{} - - v3 := v3AuthRequest{} - - if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" { - var user *v3User - - if c.ApplicationCredentialId != "" { - c.ApplicationCredentialName = "" - user = &v3User{} - } - - if user == nil && c.UserId != "" { - // UserID could be used without the domain information - user = &v3User{ - Id: c.UserId, - } - } - - if user == nil && c.UserName == "" { - // Make sure that Username or UserID are provided - return nil, fmt.Errorf("UserID or Name should be provided") - } - - if user == nil && c.DomainId != "" { - user = &v3User{ - Name: c.UserName, - Domain: &v3Domain{ - Id: c.DomainId, - }, - } - } - - if user == nil && c.Domain != "" { - user = &v3User{ - Name: c.UserName, - Domain: &v3Domain{ - Name: c.Domain, - }, - } - } - - // Make sure that DomainID or DomainName are provided among Username - if user == nil { - return nil, fmt.Errorf("DomainID or Domain should be provided") - } - - v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential} - v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{ - Id: c.ApplicationCredentialId, - Name: c.ApplicationCredentialName, - Secret: c.ApplicationCredentialSecret, - User: user, - } - } else if c.UserName == "" && c.UserId == "" { - v3.Auth.Identity.Methods = []string{v3AuthMethodToken} - v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} - } else { - v3.Auth.Identity.Methods = []string{v3AuthMethodPassword} - v3.Auth.Identity.Password = &v3AuthPassword{ - User: v3User{ - Name: c.UserName, - Id: c.UserId, - Password: c.ApiKey, - }, - } - - var domain *v3Domain - - if c.Domain != "" { - domain = &v3Domain{Name: c.Domain} - } else if c.DomainId != "" { - domain = &v3Domain{Id: c.DomainId} - } - v3.Auth.Identity.Password.User.Domain = domain - } - - if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential { - if c.TrustId != "" { - v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}} - } else if c.TenantId != "" || c.Tenant != "" { - - v3.Auth.Scope = &v3Scope{Project: &v3Project{}} - - if c.TenantId != "" { - v3.Auth.Scope.Project.Id = c.TenantId - } else if c.Tenant != "" { - v3.Auth.Scope.Project.Name = c.Tenant - switch { - case c.TenantDomain != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain} - case c.TenantDomainId != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId} - case c.Domain != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain} - case c.DomainId != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId} - default: - v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"} - } - } - } - } - - v3i = v3 - - body, err := json.Marshal(v3i) - - if err != nil { - return nil, err - } - - url := c.AuthUrl - if !strings.HasSuffix(url, "/") { - url += "/" - } - url += "auth/tokens" - req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", c.UserAgent) - return req, nil -} - -func (auth *v3Auth) Response(resp *http.Response) error { - auth.Auth = &v3AuthResponse{} - auth.Headers = resp.Header - err := readJson(resp, auth.Auth) - return err -} - -func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string { - for _, catalog := range auth.Auth.Token.Catalog { - if catalog.Type == Type { - for _, endpoint := range catalog.Endpoints { - if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) { - return endpoint.Url - } - } - } - } - return "" -} - -func (auth *v3Auth) StorageUrl(Internal bool) string { - endpointType := EndpointTypePublic - if Internal { - endpointType = EndpointTypeInternal - } - return auth.StorageUrlForEndpoint(endpointType) -} - -func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string { - return auth.endpointUrl("object-store", endpointType) -} - -func (auth *v3Auth) Token() string { - return auth.Headers.Get("X-Subject-Token") -} - -func (auth *v3Auth) Expires() time.Time { - t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt) - if err != nil { - return time.Time{} // return Zero if not parsed - } - return t -} - -func (auth *v3Auth) CdnUrl() string { - return "" -} diff --git a/vendor/github.com/ncw/swift/compatibility_1_0.go b/vendor/github.com/ncw/swift/compatibility_1_0.go deleted file mode 100644 index 7b69a757a..000000000 --- a/vendor/github.com/ncw/swift/compatibility_1_0.go +++ /dev/null @@ -1,28 +0,0 @@ -// Go 1.0 compatibility functions - -// +build !go1.1 - -package swift - -import ( - "log" - "net/http" - "time" -) - -// Cancel the request - doesn't work under < go 1.1 -func cancelRequest(transport http.RoundTripper, req *http.Request) { - log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1") -} - -// Reset a timer - Doesn't work properly < go 1.1 -// -// This is quite hard to do properly under go < 1.1 so we do a crude -// approximation and hope that everyone upgrades to go 1.1 quickly -func resetTimer(t *time.Timer, d time.Duration) { - t.Stop() - // Very likely this doesn't actually work if we are already - // selecting on t.C. However we've stopped the original timer - // so won't break transfers but may not time them out :-( - *t = *time.NewTimer(d) -} diff --git a/vendor/github.com/ncw/swift/compatibility_1_1.go b/vendor/github.com/ncw/swift/compatibility_1_1.go deleted file mode 100644 index a4f9c3ab2..000000000 --- a/vendor/github.com/ncw/swift/compatibility_1_1.go +++ /dev/null @@ -1,24 +0,0 @@ -// Go 1.1 and later compatibility functions -// -// +build go1.1 - -package swift - -import ( - "net/http" - "time" -) - -// Cancel the request -func cancelRequest(transport http.RoundTripper, req *http.Request) { - if tr, ok := transport.(interface { - CancelRequest(*http.Request) - }); ok { - tr.CancelRequest(req) - } -} - -// Reset a timer -func resetTimer(t *time.Timer, d time.Duration) { - t.Reset(d) -} diff --git a/vendor/github.com/ncw/swift/compatibility_1_6.go b/vendor/github.com/ncw/swift/compatibility_1_6.go deleted file mode 100644 index b443d01d2..000000000 --- a/vendor/github.com/ncw/swift/compatibility_1_6.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build go1.6 - -package swift - -import ( - "net/http" - "time" -) - -const IS_AT_LEAST_GO_16 = true - -func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) { - tr.ExpectContinueTimeout = t -} - -func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) { - if req.Body != nil { - req.Header.Add("Expect", "100-continue") - } - if !hasContentLength { - req.TransferEncoding = []string{"chunked"} - } -} diff --git a/vendor/github.com/ncw/swift/compatibility_not_1_6.go b/vendor/github.com/ncw/swift/compatibility_not_1_6.go deleted file mode 100644 index aabb44e2b..000000000 --- a/vendor/github.com/ncw/swift/compatibility_not_1_6.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.6 - -package swift - -import ( - "net/http" - "time" -) - -const IS_AT_LEAST_GO_16 = false - -func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {} -func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {} diff --git a/vendor/github.com/ncw/swift/dlo.go b/vendor/github.com/ncw/swift/dlo.go deleted file mode 100644 index e2e2aa97e..000000000 --- a/vendor/github.com/ncw/swift/dlo.go +++ /dev/null @@ -1,136 +0,0 @@ -package swift - -import ( - "os" -) - -// DynamicLargeObjectCreateFile represents an open static large object -type DynamicLargeObjectCreateFile struct { - largeObjectCreateFile -} - -// DynamicLargeObjectCreateFile creates a dynamic large object -// returning an object which satisfies io.Writer, io.Seeker, io.Closer -// and io.ReaderFrom. The flags are as passes to the -// largeObjectCreate method. -func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { - lo, err := c.largeObjectCreate(opts) - if err != nil { - return nil, err - } - - return withBuffer(opts, &DynamicLargeObjectCreateFile{ - largeObjectCreateFile: *lo, - }), nil -} - -// DynamicLargeObjectCreate creates or truncates an existing dynamic -// large object returning a writeable object. This sets opts.Flags to -// an appropriate value before calling DynamicLargeObjectCreateFile -func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { - opts.Flags = os.O_TRUNC | os.O_CREATE - return c.DynamicLargeObjectCreateFile(opts) -} - -// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments. -func (c *Connection) DynamicLargeObjectDelete(container string, path string) error { - return c.LargeObjectDelete(container, path) -} - -// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName -func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { - info, headers, err := c.Object(dstContainer, srcObjectName) - if err != nil { - return err - } - - segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"]) - if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType); err != nil { - return err - } - - if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { - return err - } - - return nil -} - -// createDLOManifest creates a dynamic large object manifest -func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string) error { - headers := make(Headers) - headers["X-Object-Manifest"] = prefix - manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers) - if err != nil { - return err - } - - if err := manifest.Close(); err != nil { - return err - } - - return nil -} - -// Close satisfies the io.Closer interface -func (file *DynamicLargeObjectCreateFile) Close() error { - return file.Flush() -} - -func (file *DynamicLargeObjectCreateFile) Flush() error { - err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType) - if err != nil { - return err - } - return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) -} - -func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) { - //a simple container listing works 99.9% of the time - segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath}) - if err != nil { - return nil, err - } - - hasObjectName := make(map[string]struct{}) - for _, segment := range segments { - hasObjectName[segment.Name] = struct{}{} - } - - //The container listing might be outdated (i.e. not contain all existing - //segment objects yet) because of temporary inconsistency (Swift is only - //eventually consistent!). Check its completeness. - segmentNumber := 0 - for { - segmentNumber++ - segmentName := getSegment(segmentPath, segmentNumber) - if _, seen := hasObjectName[segmentName]; seen { - continue - } - - //This segment is missing in the container listing. Use a more reliable - //request to check its existence. (HEAD requests on segments are - //guaranteed to return the correct metadata, except for the pathological - //case of an outage of large parts of the Swift cluster or its network, - //since every segment is only written once.) - segment, _, err := c.Object(segmentContainer, segmentName) - switch err { - case nil: - //found new segment -> add it in the correct position and keep - //going, more might be missing - if segmentNumber <= len(segments) { - segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...) - segments[segmentNumber-1] = segment - } else { - segments = append(segments, segment) - } - continue - case ObjectNotFound: - //This segment is missing. Since we upload segments sequentially, - //there won't be any more segments after it. - return segments, nil - default: - return nil, err //unexpected error - } - } -} diff --git a/vendor/github.com/ncw/swift/doc.go b/vendor/github.com/ncw/swift/doc.go deleted file mode 100644 index 44efde7bf..000000000 --- a/vendor/github.com/ncw/swift/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files - -Standard Usage - -Most of the work is done through the Container*() and Object*() methods. - -All methods are safe to use concurrently in multiple go routines. - -Object Versioning - -As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system. - -Rackspace Sub Module - -This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects. - -*/ -package swift diff --git a/vendor/github.com/ncw/swift/go.mod b/vendor/github.com/ncw/swift/go.mod deleted file mode 100644 index 29f6ee2cb..000000000 --- a/vendor/github.com/ncw/swift/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/ncw/swift diff --git a/vendor/github.com/ncw/swift/largeobjects.go b/vendor/github.com/ncw/swift/largeobjects.go deleted file mode 100644 index bec640b00..000000000 --- a/vendor/github.com/ncw/swift/largeobjects.go +++ /dev/null @@ -1,448 +0,0 @@ -package swift - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha1" - "encoding/hex" - "errors" - "fmt" - "io" - "os" - gopath "path" - "strconv" - "strings" - "time" -) - -// NotLargeObject is returned if an operation is performed on an object which isn't large. -var NotLargeObject = errors.New("Not a large object") - -// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded -var readAfterWriteTimeout = 15 * time.Second - -// readAfterWriteWait defines the time to sleep between two retries -var readAfterWriteWait = 200 * time.Millisecond - -// largeObjectCreateFile represents an open static or dynamic large object -type largeObjectCreateFile struct { - conn *Connection - container string - objectName string - currentLength int64 - filePos int64 - chunkSize int64 - segmentContainer string - prefix string - contentType string - checkHash bool - segments []Object - headers Headers - minChunkSize int64 -} - -func swiftSegmentPath(path string) (string, error) { - checksum := sha1.New() - random := make([]byte, 32) - if _, err := rand.Read(random); err != nil { - return "", err - } - path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) - return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil -} - -func getSegment(segmentPath string, partNumber int) string { - return fmt.Sprintf("%s/%016d", segmentPath, partNumber) -} - -func parseFullPath(manifest string) (container string, prefix string) { - components := strings.SplitN(manifest, "/", 2) - container = components[0] - if len(components) > 1 { - prefix = components[1] - } - return container, prefix -} - -func (headers Headers) IsLargeObjectDLO() bool { - _, isDLO := headers["X-Object-Manifest"] - return isDLO -} - -func (headers Headers) IsLargeObjectSLO() bool { - _, isSLO := headers["X-Static-Large-Object"] - return isSLO -} - -func (headers Headers) IsLargeObject() bool { - return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO() -} - -func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) { - if manifest, isDLO := headers["X-Object-Manifest"]; isDLO { - segmentContainer, segmentPath := parseFullPath(manifest) - segments, err := c.getAllDLOSegments(segmentContainer, segmentPath) - return segmentContainer, segments, err - } - if headers.IsLargeObjectSLO() { - return c.getAllSLOSegments(container, path) - } - return "", nil, NotLargeObject -} - -// LargeObjectOpts describes how a large object should be created -type LargeObjectOpts struct { - Container string // Name of container to place object - ObjectName string // Name of object - Flags int // Creation flags - CheckHash bool // If set Check the hash - Hash string // If set use this hash to check - ContentType string // Content-Type of the object - Headers Headers // Additional headers to upload the object with - ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set - MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info - SegmentContainer string // Name of the container to place segments - SegmentPrefix string // Prefix to use for the segments - NoBuffer bool // Prevents using a bufio.Writer to write segments -} - -type LargeObjectFile interface { - io.Writer - io.Seeker - io.Closer - Size() int64 - Flush() error -} - -// largeObjectCreate creates a large object at opts.Container, opts.ObjectName. -// -// opts.Flags can have the following bits set -// os.TRUNC - remove the contents of the large object if it exists -// os.APPEND - write at the end of the large object -func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) { - var ( - segmentPath string - segmentContainer string - segments []Object - currentLength int64 - err error - ) - - if opts.SegmentPrefix != "" { - segmentPath = opts.SegmentPrefix - } else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil { - return nil, err - } - - if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil { - if opts.Flags&os.O_TRUNC != 0 { - c.LargeObjectDelete(opts.Container, opts.ObjectName) - } else { - currentLength = info.Bytes - if headers.IsLargeObject() { - segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers) - if err != nil { - return nil, err - } - if len(segments) > 0 { - segmentPath = gopath.Dir(segments[0].Name) - } - } else { - if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil { - return nil, err - } - segments = append(segments, info) - } - } - } else if err != ObjectNotFound { - return nil, err - } - - // segmentContainer is not empty when the manifest already existed - if segmentContainer == "" { - if opts.SegmentContainer != "" { - segmentContainer = opts.SegmentContainer - } else { - segmentContainer = opts.Container + "_segments" - } - } - - file := &largeObjectCreateFile{ - conn: c, - checkHash: opts.CheckHash, - container: opts.Container, - objectName: opts.ObjectName, - chunkSize: opts.ChunkSize, - minChunkSize: opts.MinChunkSize, - headers: opts.Headers, - segmentContainer: segmentContainer, - prefix: segmentPath, - segments: segments, - currentLength: currentLength, - } - - if file.chunkSize == 0 { - file.chunkSize = 10 * 1024 * 1024 - } - - if file.minChunkSize > file.chunkSize { - file.chunkSize = file.minChunkSize - } - - if opts.Flags&os.O_APPEND != 0 { - file.filePos = currentLength - } - - return file, nil -} - -// LargeObjectDelete deletes the large object named by container, path -func (c *Connection) LargeObjectDelete(container string, objectName string) error { - _, headers, err := c.Object(container, objectName) - if err != nil { - return err - } - - var objects [][]string - if headers.IsLargeObject() { - segmentContainer, segments, err := c.getAllSegments(container, objectName, headers) - if err != nil { - return err - } - for _, obj := range segments { - objects = append(objects, []string{segmentContainer, obj.Name}) - } - } - objects = append(objects, []string{container, objectName}) - - info, err := c.cachedQueryInfo() - if err == nil && info.SupportsBulkDelete() && len(objects) > 0 { - filenames := make([]string, len(objects)) - for i, obj := range objects { - filenames[i] = obj[0] + "/" + obj[1] - } - _, err = c.doBulkDelete(filenames) - // Don't fail on ObjectNotFound because eventual consistency - // makes this situation normal. - if err != nil && err != Forbidden && err != ObjectNotFound { - return err - } - } else { - for _, obj := range objects { - if err := c.ObjectDelete(obj[0], obj[1]); err != nil { - return err - } - } - } - - return nil -} - -// LargeObjectGetSegments returns all the segments that compose an object -// If the object is a Dynamic Large Object (DLO), it just returns the objects -// that have the prefix as indicated by the manifest. -// If the object is a Static Large Object (SLO), it retrieves the JSON content -// of the manifest and return all the segments of it. -func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) { - _, headers, err := c.Object(container, path) - if err != nil { - return "", nil, err - } - - return c.getAllSegments(container, path, headers) -} - -// Seek sets the offset for the next write operation -func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) { - switch whence { - case 0: - file.filePos = offset - case 1: - file.filePos += offset - case 2: - file.filePos = file.currentLength + offset - default: - return -1, fmt.Errorf("invalid value for whence") - } - if file.filePos < 0 { - return -1, fmt.Errorf("negative offset") - } - return file.filePos, nil -} - -func (file *largeObjectCreateFile) Size() int64 { - return file.currentLength -} - -func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) { - endTimer := time.NewTimer(readAfterWriteTimeout) - defer endTimer.Stop() - waitingTime := readAfterWriteWait - for { - var headers Headers - var sz int64 - if headers, sz, err = fn(); err == nil { - if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz { - return - } - } else { - return - } - waitTimer := time.NewTimer(waitingTime) - select { - case <-endTimer.C: - waitTimer.Stop() - err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz) - return - case <-waitTimer.C: - waitingTime *= 2 - } - } -} - -func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) { - err = withLORetry(expectedSize, func() (Headers, int64, error) { - var info Object - var headers Headers - info, headers, err = c.objectBase(container, objectName) - if err != nil { - return headers, 0, err - } - return headers, info.Bytes, nil - }) - return -} - -// Write satisfies the io.Writer interface -func (file *largeObjectCreateFile) Write(buf []byte) (int, error) { - var sz int64 - var relativeFilePos int - writeSegmentIdx := 0 - for i, obj := range file.segments { - if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) { - relativeFilePos = int(file.filePos - sz) - break - } - writeSegmentIdx++ - sz += obj.Bytes - } - sizeToWrite := len(buf) - for offset := 0; offset < sizeToWrite; { - newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos) - if err != nil { - return 0, err - } - if writeSegmentIdx < len(file.segments) { - file.segments[writeSegmentIdx] = *newSegment - } else { - file.segments = append(file.segments, *newSegment) - } - offset += n - writeSegmentIdx++ - relativeFilePos = 0 - } - file.filePos += int64(sizeToWrite) - file.currentLength = 0 - for _, obj := range file.segments { - file.currentLength += obj.Bytes - } - return sizeToWrite, nil -} - -func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) { - var ( - readers []io.Reader - existingSegment *Object - segmentSize int - ) - segmentName := getSegment(file.prefix, writeSegmentIdx+1) - sizeToRead := int(file.chunkSize) - if writeSegmentIdx < len(file.segments) { - existingSegment = &file.segments[writeSegmentIdx] - if writeSegmentIdx != len(file.segments)-1 { - sizeToRead = int(existingSegment.Bytes) - } - if relativeFilePos > 0 { - headers := make(Headers) - headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10) - existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) - if err != nil { - return nil, 0, err - } - defer existingSegmentReader.Close() - sizeToRead -= relativeFilePos - segmentSize += relativeFilePos - readers = []io.Reader{existingSegmentReader} - } - } - if sizeToRead > len(buf) { - sizeToRead = len(buf) - } - segmentSize += sizeToRead - readers = append(readers, bytes.NewReader(buf[:sizeToRead])) - if existingSegment != nil && segmentSize < int(existingSegment.Bytes) { - headers := make(Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-" - tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) - if err != nil { - return nil, 0, err - } - defer tailSegmentReader.Close() - segmentSize = int(existingSegment.Bytes) - readers = append(readers, tailSegmentReader) - } - segmentReader := io.MultiReader(readers...) - headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil) - if err != nil { - return nil, 0, err - } - return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil -} - -func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile { - if !opts.NoBuffer { - return &bufferedLargeObjectFile{ - LargeObjectFile: lo, - bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)), - } - } - return lo -} - -type bufferedLargeObjectFile struct { - LargeObjectFile - bw *bufio.Writer -} - -func (blo *bufferedLargeObjectFile) Close() error { - err := blo.bw.Flush() - if err != nil { - return err - } - return blo.LargeObjectFile.Close() -} - -func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) { - return blo.bw.Write(p) -} - -func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) { - err := blo.bw.Flush() - if err != nil { - return 0, err - } - return blo.LargeObjectFile.Seek(offset, whence) -} - -func (blo *bufferedLargeObjectFile) Size() int64 { - return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered()) -} - -func (blo *bufferedLargeObjectFile) Flush() error { - err := blo.bw.Flush() - if err != nil { - return err - } - return blo.LargeObjectFile.Flush() -} diff --git a/vendor/github.com/ncw/swift/meta.go b/vendor/github.com/ncw/swift/meta.go deleted file mode 100644 index 7e149e139..000000000 --- a/vendor/github.com/ncw/swift/meta.go +++ /dev/null @@ -1,174 +0,0 @@ -// Metadata manipulation in and out of Headers - -package swift - -import ( - "fmt" - "net/http" - "strconv" - "strings" - "time" -) - -// Metadata stores account, container or object metadata. -type Metadata map[string]string - -// Metadata gets the Metadata starting with the metaPrefix out of the Headers. -// -// The keys in the Metadata will be converted to lower case -func (h Headers) Metadata(metaPrefix string) Metadata { - m := Metadata{} - metaPrefix = http.CanonicalHeaderKey(metaPrefix) - for key, value := range h { - if strings.HasPrefix(key, metaPrefix) { - metaKey := strings.ToLower(key[len(metaPrefix):]) - m[metaKey] = value - } - } - return m -} - -// AccountMetadata converts Headers from account to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) AccountMetadata() Metadata { - return h.Metadata("X-Account-Meta-") -} - -// ContainerMetadata converts Headers from container to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) ContainerMetadata() Metadata { - return h.Metadata("X-Container-Meta-") -} - -// ObjectMetadata converts Headers from object to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) ObjectMetadata() Metadata { - return h.Metadata("X-Object-Meta-") -} - -// Headers convert the Metadata starting with the metaPrefix into a -// Headers. -// -// The keys in the Metadata will be converted from lower case to http -// Canonical (see http.CanonicalHeaderKey). -func (m Metadata) Headers(metaPrefix string) Headers { - h := Headers{} - for key, value := range m { - key = http.CanonicalHeaderKey(metaPrefix + key) - h[key] = value - } - return h -} - -// AccountHeaders converts the Metadata for the account. -func (m Metadata) AccountHeaders() Headers { - return m.Headers("X-Account-Meta-") -} - -// ContainerHeaders converts the Metadata for the container. -func (m Metadata) ContainerHeaders() Headers { - return m.Headers("X-Container-Meta-") -} - -// ObjectHeaders converts the Metadata for the object. -func (m Metadata) ObjectHeaders() Headers { - return m.Headers("X-Object-Meta-") -} - -// Turns a number of ns into a floating point string in seconds -// -// Trims trailing zeros and guaranteed to be perfectly accurate -func nsToFloatString(ns int64) string { - if ns < 0 { - return "-" + nsToFloatString(-ns) - } - result := fmt.Sprintf("%010d", ns) - split := len(result) - 9 - result, decimals := result[:split], result[split:] - decimals = strings.TrimRight(decimals, "0") - if decimals != "" { - result += "." - result += decimals - } - return result -} - -// Turns a floating point string in seconds into a ns integer -// -// Guaranteed to be perfectly accurate -func floatStringToNs(s string) (int64, error) { - const zeros = "000000000" - if point := strings.IndexRune(s, '.'); point >= 0 { - tail := s[point+1:] - if fill := 9 - len(tail); fill < 0 { - tail = tail[:9] - } else { - tail += zeros[:fill] - } - s = s[:point] + tail - } else if len(s) > 0 { // Make sure empty string produces an error - s += zeros - } - return strconv.ParseInt(s, 10, 64) -} - -// FloatStringToTime converts a floating point number string to a time.Time -// -// The string is floating point number of seconds since the epoch -// (Unix time). The number should be in fixed point format (not -// exponential), eg "1354040105.123456789" which represents the time -// "2012-11-27T18:15:05.123456789Z" -// -// Some care is taken to preserve all the accuracy in the time.Time -// (which wouldn't happen with a naive conversion through float64) so -// a round trip conversion won't change the data. -// -// If an error is returned then time will be returned as the zero time. -func FloatStringToTime(s string) (t time.Time, err error) { - ns, err := floatStringToNs(s) - if err != nil { - return - } - t = time.Unix(0, ns) - return -} - -// TimeToFloatString converts a time.Time object to a floating point string -// -// The string is floating point number of seconds since the epoch -// (Unix time). The number is in fixed point format (not -// exponential), eg "1354040105.123456789" which represents the time -// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped -// from the output. -// -// Some care is taken to preserve all the accuracy in the time.Time -// (which wouldn't happen with a naive conversion through float64) so -// a round trip conversion won't change the data. -func TimeToFloatString(t time.Time) string { - return nsToFloatString(t.UnixNano()) -} - -// GetModTime reads a modification time (mtime) from a Metadata object -// -// This is a defacto standard (used in the official python-swiftclient -// amongst others) for storing the modification time (as read using -// os.Stat) for an object. It is stored using the key 'mtime', which -// for example when written to an object will be 'X-Object-Meta-Mtime'. -// -// If an error is returned then time will be returned as the zero time. -func (m Metadata) GetModTime() (t time.Time, err error) { - return FloatStringToTime(m["mtime"]) -} - -// SetModTime writes an modification time (mtime) to a Metadata object -// -// This is a defacto standard (used in the official python-swiftclient -// amongst others) for storing the modification time (as read using -// os.Stat) for an object. It is stored using the key 'mtime', which -// for example when written to an object will be 'X-Object-Meta-Mtime'. -func (m Metadata) SetModTime(t time.Time) { - m["mtime"] = TimeToFloatString(t) -} diff --git a/vendor/github.com/ncw/swift/notes.txt b/vendor/github.com/ncw/swift/notes.txt deleted file mode 100644 index f738552cd..000000000 --- a/vendor/github.com/ncw/swift/notes.txt +++ /dev/null @@ -1,55 +0,0 @@ -Notes on Go Swift -================= - -Make a builder style interface like the Google Go APIs? Advantages -are that it is easy to add named methods to the service object to do -specific things. Slightly less efficient. Not sure about how to -return extra stuff though - in an object? - -Make a container struct so these could be methods on it? - -Make noResponse check for 204? - -Make storage public so it can be extended easily? - -Rename to go-swift to match user agent string? - -Reconnect on auth error - 401 when token expires isn't tested - -Make more api compatible with python cloudfiles? - -Retry operations on timeout / network errors? -- also 408 error -- GET requests only? - -Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock - -Add extra headers field to Connection (for via etc) - -Make errors use an error heirachy then can catch them with a type assertion - - Error(...) - ObjectCorrupted{ Error } - -Make a Debug flag in connection for logging stuff - -Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc - -Object range - -Object create, update with X-Delete-At or X-Delete-After - -Large object support -- check uploads are less than 5GB in normal mode? - -Access control CORS? - -Swift client retries and backs off for all types of errors - -Implement net error interface? - -type Error interface { - error - Timeout() bool // Is the error a timeout? - Temporary() bool // Is the error temporary? -} diff --git a/vendor/github.com/ncw/swift/slo.go b/vendor/github.com/ncw/swift/slo.go deleted file mode 100644 index 6a10ddfc0..000000000 --- a/vendor/github.com/ncw/swift/slo.go +++ /dev/null @@ -1,171 +0,0 @@ -package swift - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/url" - "os" -) - -// StaticLargeObjectCreateFile represents an open static large object -type StaticLargeObjectCreateFile struct { - largeObjectCreateFile -} - -var SLONotSupported = errors.New("SLO not supported") - -type swiftSegment struct { - Path string `json:"path,omitempty"` - Etag string `json:"etag,omitempty"` - Size int64 `json:"size_bytes,omitempty"` - // When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes` - // but when querying the JSON content of a manifest with the `multipart-manifest=get` - // parameter, Swift names those attributes `name`, `hash` and `bytes`. - // We use all the different attributes names in this structure to be able to use - // the same structure for both uploading and retrieving. - Name string `json:"name,omitempty"` - Hash string `json:"hash,omitempty"` - Bytes int64 `json:"bytes,omitempty"` - ContentType string `json:"content_type,omitempty"` - LastModified string `json:"last_modified,omitempty"` -} - -// StaticLargeObjectCreateFile creates a static large object returning -// an object which satisfies io.Writer, io.Seeker, io.Closer and -// io.ReaderFrom. The flags are as passed to the largeObjectCreate -// method. -func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { - info, err := c.cachedQueryInfo() - if err != nil || !info.SupportsSLO() { - return nil, SLONotSupported - } - realMinChunkSize := info.SLOMinSegmentSize() - if realMinChunkSize > opts.MinChunkSize { - opts.MinChunkSize = realMinChunkSize - } - lo, err := c.largeObjectCreate(opts) - if err != nil { - return nil, err - } - return withBuffer(opts, &StaticLargeObjectCreateFile{ - largeObjectCreateFile: *lo, - }), nil -} - -// StaticLargeObjectCreate creates or truncates an existing static -// large object returning a writeable object. This sets opts.Flags to -// an appropriate value before calling StaticLargeObjectCreateFile -func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { - opts.Flags = os.O_TRUNC | os.O_CREATE - return c.StaticLargeObjectCreateFile(opts) -} - -// StaticLargeObjectDelete deletes a static large object and all of its segments. -func (c *Connection) StaticLargeObjectDelete(container string, path string) error { - info, err := c.cachedQueryInfo() - if err != nil || !info.SupportsSLO() { - return SLONotSupported - } - return c.LargeObjectDelete(container, path) -} - -// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName -func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { - swiftInfo, err := c.cachedQueryInfo() - if err != nil || !swiftInfo.SupportsSLO() { - return SLONotSupported - } - info, headers, err := c.Object(srcContainer, srcObjectName) - if err != nil { - return err - } - - container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers) - if err != nil { - return err - } - - //copy only metadata during move (other headers might not be safe for copying) - headers = headers.ObjectMetadata().ObjectHeaders() - - if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil { - return err - } - - if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { - return err - } - - return nil -} - -// createSLOManifest creates a static large object manifest -func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error { - sloSegments := make([]swiftSegment, len(segments)) - for i, segment := range segments { - sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name) - sloSegments[i].Etag = segment.Hash - sloSegments[i].Size = segment.Bytes - } - - content, err := json.Marshal(sloSegments) - if err != nil { - return err - } - - values := url.Values{} - values.Set("multipart-manifest", "put") - if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil { - return err - } - - return nil -} - -func (file *StaticLargeObjectCreateFile) Close() error { - return file.Flush() -} - -func (file *StaticLargeObjectCreateFile) Flush() error { - if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil { - return err - } - return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) -} - -func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) { - var ( - segmentList []swiftSegment - segments []Object - segPath string - segmentContainer string - ) - - values := url.Values{} - values.Set("multipart-manifest", "get") - - file, _, err := c.objectOpen(container, path, true, nil, values) - if err != nil { - return "", nil, err - } - - content, err := ioutil.ReadAll(file) - if err != nil { - return "", nil, err - } - - json.Unmarshal(content, &segmentList) - for _, segment := range segmentList { - segmentContainer, segPath = parseFullPath(segment.Name[1:]) - segments = append(segments, Object{ - Name: segPath, - Bytes: segment.Bytes, - Hash: segment.Hash, - }) - } - - return segmentContainer, segments, nil -} diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go deleted file mode 100644 index 72ede2493..000000000 --- a/vendor/github.com/ncw/swift/swift.go +++ /dev/null @@ -1,2243 +0,0 @@ -package swift - -import ( - "bufio" - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "os" - "path" - "strconv" - "strings" - "sync" - "time" -) - -const ( - DefaultUserAgent = "goswift/1.0" // Default user agent - DefaultRetries = 3 // Default number of retries on token expiry - TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC - UploadTar = "tar" // Data format specifier for Connection.BulkUpload(). - UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload(). - UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload(). - allContainersLimit = 10000 // Number of containers to fetch at once - allObjectsLimit = 10000 // Number objects to fetch at once - allObjectsChanLimit = 1000 // ...when fetching to a channel -) - -// ObjectType is the type of the swift object, regular, static large, -// or dynamic large. -type ObjectType int - -// Values that ObjectType can take -const ( - RegularObjectType ObjectType = iota - StaticLargeObjectType - DynamicLargeObjectType -) - -// Connection holds the details of the connection to the swift server. -// -// You need to provide UserName, ApiKey and AuthUrl when you create a -// connection then call Authenticate on it. -// -// The auth version in use will be detected from the AuthURL - you can -// override this with the AuthVersion parameter. -// -// If using v2 auth you can also set Region in the Connection -// structure. If you don't set Region you will get the default region -// which may not be what you want. -// -// For reference some common AuthUrls looks like this: -// -// Rackspace US https://auth.api.rackspacecloud.com/v1.0 -// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 -// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 -// Memset Memstore UK https://auth.storage.memset.com/v1.0 -// Memstore v2 https://auth.storage.memset.com/v2.0 -// -// When using Google Appengine you must provide the Connection with an -// appengine-specific Transport: -// -// import ( -// "appengine/urlfetch" -// "fmt" -// "github.com/ncw/swift" -// ) -// -// func handler(w http.ResponseWriter, r *http.Request) { -// ctx := appengine.NewContext(r) -// tr := urlfetch.Transport{Context: ctx} -// c := swift.Connection{ -// UserName: "user", -// ApiKey: "key", -// AuthUrl: "auth_url", -// Transport: tr, -// } -// _ := c.Authenticate() -// containers, _ := c.ContainerNames(nil) -// fmt.Fprintf(w, "containers: %q", containers) -// } -// -// If you don't supply a Transport, one is made which relies on -// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment). -// This means that the connection will respect the HTTP proxy specified by the -// environment variables $HTTP_PROXY and $NO_PROXY. -type Connection struct { - // Parameters - fill these in before calling Authenticate - // They are all optional except UserName, ApiKey and AuthUrl - Domain string // User's domain name - DomainId string // User's domain Id - UserName string // UserName for api - UserId string // User Id - ApiKey string // Key for api access - ApplicationCredentialId string // Application Credential ID - ApplicationCredentialName string // Application Credential Name - ApplicationCredentialSecret string // Application Credential Secret - AuthUrl string // Auth URL - Retries int // Retries on error (default is 3) - UserAgent string // Http User agent (default goswift/1.0) - ConnectTimeout time.Duration // Connect channel timeout (default 10s) - Timeout time.Duration // Data channel timeout (default 60s) - Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only) - AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect - Internal bool // Set this to true to use the the internal / service network - Tenant string // Name of the tenant (v2,v3 auth only) - TenantId string // Id of the tenant (v2,v3 auth only) - EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set) - TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain - TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain - TrustId string // Id of the trust (v3 auth only) - Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine) - // These are filled in after Authenticate is called as are the defaults for above - StorageUrl string - AuthToken string - Expires time.Time // time the token expires, may be Zero if unknown - client *http.Client - Auth Authenticator `json:"-" xml:"-"` // the current authenticator - authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth - // swiftInfo is filled after QueryInfo is called - swiftInfo SwiftInfo -} - -// setFromEnv reads the value that param points to (it must be a -// pointer), if it isn't the zero value then it reads the environment -// variable name passed in, parses it according to the type and writes -// it to the pointer. -func setFromEnv(param interface{}, name string) (err error) { - val := os.Getenv(name) - if val == "" { - return - } - switch result := param.(type) { - case *string: - if *result == "" { - *result = val - } - case *int: - if *result == 0 { - *result, err = strconv.Atoi(val) - } - case *bool: - if *result == false { - *result, err = strconv.ParseBool(val) - } - case *time.Duration: - if *result == 0 { - *result, err = time.ParseDuration(val) - } - case *EndpointType: - if *result == EndpointType("") { - *result = EndpointType(val) - } - default: - return newErrorf(0, "can't set var of type %T", param) - } - return err -} - -// ApplyEnvironment reads environment variables and applies them to -// the Connection structure. It won't overwrite any parameters which -// are already set in the Connection struct. -// -// To make a new Connection object entirely from the environment you -// would do: -// -// c := new(Connection) -// err := c.ApplyEnvironment() -// if err != nil { log.Fatal(err) } -// -// The naming of these variables follows the official Openstack naming -// scheme so it should be compatible with OpenStack rc files. -// -// For v1 authentication (obsolete) -// ST_AUTH - Auth URL -// ST_USER - UserName for api -// ST_KEY - Key for api access -// -// For v2 authentication -// OS_AUTH_URL - Auth URL -// OS_USERNAME - UserName for api -// OS_PASSWORD - Key for api access -// OS_TENANT_NAME - Name of the tenant -// OS_TENANT_ID - Id of the tenant -// OS_REGION_NAME - Region to use - default is use first region -// -// For v3 authentication -// OS_AUTH_URL - Auth URL -// OS_USERNAME - UserName for api -// OS_USER_ID - User Id -// OS_PASSWORD - Key for api access -// OS_APPLICATION_CREDENTIAL_ID - Application Credential ID -// OS_APPLICATION_CREDENTIAL_NAME - Application Credential Name -// OS_APPLICATION_CREDENTIAL_SECRET - Application Credential Secret -// OS_USER_DOMAIN_NAME - User's domain name -// OS_USER_DOMAIN_ID - User's domain Id -// OS_PROJECT_NAME - Name of the project -// OS_PROJECT_DOMAIN_NAME - Name of the tenant's domain, only needed if it differs from the user domain -// OS_PROJECT_DOMAIN_ID - Id of the tenant's domain, only needed if it differs the from user domain -// OS_TRUST_ID - If of the trust -// OS_REGION_NAME - Region to use - default is use first region -// -// Other -// OS_ENDPOINT_TYPE - Endpoint type public, internal or admin -// ST_AUTH_VERSION - Choose auth version - 1, 2 or 3 or leave at 0 for autodetect -// -// For manual authentication -// OS_STORAGE_URL - storage URL from alternate authentication -// OS_AUTH_TOKEN - Auth Token from alternate authentication -// -// Library specific -// GOSWIFT_RETRIES - Retries on error (default is 3) -// GOSWIFT_USER_AGENT - HTTP User agent (default goswift/1.0) -// GOSWIFT_CONNECT_TIMEOUT - Connect channel timeout with unit, eg "10s", "100ms" (default "10s") -// GOSWIFT_TIMEOUT - Data channel timeout with unit, eg "10s", "100ms" (default "60s") -// GOSWIFT_INTERNAL - Set this to "true" to use the the internal network (obsolete - use OS_ENDPOINT_TYPE) -func (c *Connection) ApplyEnvironment() (err error) { - for _, item := range []struct { - result interface{} - name string - }{ - // Environment variables - keep in same order as Connection - {&c.Domain, "OS_USER_DOMAIN_NAME"}, - {&c.DomainId, "OS_USER_DOMAIN_ID"}, - {&c.UserName, "OS_USERNAME"}, - {&c.UserId, "OS_USER_ID"}, - {&c.ApiKey, "OS_PASSWORD"}, - {&c.ApplicationCredentialId, "OS_APPLICATION_CREDENTIAL_ID"}, - {&c.ApplicationCredentialName, "OS_APPLICATION_CREDENTIAL_NAME"}, - {&c.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET"}, - {&c.AuthUrl, "OS_AUTH_URL"}, - {&c.Retries, "GOSWIFT_RETRIES"}, - {&c.UserAgent, "GOSWIFT_USER_AGENT"}, - {&c.ConnectTimeout, "GOSWIFT_CONNECT_TIMEOUT"}, - {&c.Timeout, "GOSWIFT_TIMEOUT"}, - {&c.Region, "OS_REGION_NAME"}, - {&c.AuthVersion, "ST_AUTH_VERSION"}, - {&c.Internal, "GOSWIFT_INTERNAL"}, - {&c.Tenant, "OS_TENANT_NAME"}, //v2 - {&c.Tenant, "OS_PROJECT_NAME"}, // v3 - {&c.TenantId, "OS_TENANT_ID"}, - {&c.EndpointType, "OS_ENDPOINT_TYPE"}, - {&c.TenantDomain, "OS_PROJECT_DOMAIN_NAME"}, - {&c.TenantDomainId, "OS_PROJECT_DOMAIN_ID"}, - {&c.TrustId, "OS_TRUST_ID"}, - {&c.StorageUrl, "OS_STORAGE_URL"}, - {&c.AuthToken, "OS_AUTH_TOKEN"}, - // v1 auth alternatives - {&c.ApiKey, "ST_KEY"}, - {&c.UserName, "ST_USER"}, - {&c.AuthUrl, "ST_AUTH"}, - } { - err = setFromEnv(item.result, item.name) - if err != nil { - return newErrorf(0, "failed to read env var %q: %v", item.name, err) - } - } - return nil -} - -// Error - all errors generated by this package are of this type. Other error -// may be passed on from library functions though. -type Error struct { - StatusCode int // HTTP status code if relevant or 0 if not - Text string -} - -// Error satisfy the error interface. -func (e *Error) Error() string { - return e.Text -} - -// newError make a new error from a string. -func newError(StatusCode int, Text string) *Error { - return &Error{ - StatusCode: StatusCode, - Text: Text, - } -} - -// newErrorf makes a new error from sprintf parameters. -func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error { - return newError(StatusCode, fmt.Sprintf(Text, Parameters...)) -} - -// errorMap defines http error codes to error mappings. -type errorMap map[int]error - -var ( - // Specific Errors you might want to check for equality - NotModified = newError(304, "Not Modified") - BadRequest = newError(400, "Bad Request") - AuthorizationFailed = newError(401, "Authorization Failed") - ContainerNotFound = newError(404, "Container Not Found") - ContainerNotEmpty = newError(409, "Container Not Empty") - ObjectNotFound = newError(404, "Object Not Found") - ObjectCorrupted = newError(422, "Object Corrupted") - TimeoutError = newError(408, "Timeout when reading or writing data") - Forbidden = newError(403, "Operation forbidden") - TooLargeObject = newError(413, "Too Large Object") - RateLimit = newError(498, "Rate Limit") - TooManyRequests = newError(429, "TooManyRequests") - - // Mappings for authentication errors - authErrorMap = errorMap{ - 400: BadRequest, - 401: AuthorizationFailed, - 403: Forbidden, - } - - // Mappings for container errors - ContainerErrorMap = errorMap{ - 400: BadRequest, - 403: Forbidden, - 404: ContainerNotFound, - 409: ContainerNotEmpty, - 498: RateLimit, - } - - // Mappings for object errors - objectErrorMap = errorMap{ - 304: NotModified, - 400: BadRequest, - 403: Forbidden, - 404: ObjectNotFound, - 413: TooLargeObject, - 422: ObjectCorrupted, - 429: TooManyRequests, - 498: RateLimit, - } -) - -// checkClose is used to check the return from Close in a defer -// statement. -func checkClose(c io.Closer, err *error) { - cerr := c.Close() - if *err == nil { - *err = cerr - } -} - -// drainAndClose discards all data from rd and closes it. -// If an error occurs during Read, it is discarded. -func drainAndClose(rd io.ReadCloser, err *error) { - if rd == nil { - return - } - - _, _ = io.Copy(ioutil.Discard, rd) - cerr := rd.Close() - if err != nil && *err == nil { - *err = cerr - } -} - -// parseHeaders checks a response for errors and translates into -// standard errors if necessary. If an error is returned, resp.Body -// has been drained and closed. -func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { - if errorMap != nil { - if err, ok := errorMap[resp.StatusCode]; ok { - drainAndClose(resp.Body, nil) - return err - } - } - if resp.StatusCode < 200 || resp.StatusCode > 299 { - drainAndClose(resp.Body, nil) - return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) - } - return nil -} - -// readHeaders returns a Headers object from the http.Response. -// -// If it receives multiple values for a key (which should never -// happen) it will use the first one -func readHeaders(resp *http.Response) Headers { - headers := Headers{} - for key, values := range resp.Header { - headers[key] = values[0] - } - return headers -} - -// Headers stores HTTP headers (can only have one of each header like Swift). -type Headers map[string]string - -// Does an http request using the running timer passed in -func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) { - // Do the request in the background so we can check the timeout - type result struct { - resp *http.Response - err error - } - done := make(chan result, 1) - go func() { - resp, err := c.client.Do(req) - done <- result{resp, err} - }() - // Wait for the read or the timeout - select { - case r := <-done: - return r.resp, r.err - case <-timer.C: - // Kill the connection on timeout so we don't leak sockets or goroutines - cancelRequest(c.Transport, req) - return nil, TimeoutError - } - panic("unreachable") // For Go 1.0 -} - -// Set defaults for any unset values -// -// Call with authLock held -func (c *Connection) setDefaults() { - if c.UserAgent == "" { - c.UserAgent = DefaultUserAgent - } - if c.Retries == 0 { - c.Retries = DefaultRetries - } - if c.ConnectTimeout == 0 { - c.ConnectTimeout = 10 * time.Second - } - if c.Timeout == 0 { - c.Timeout = 60 * time.Second - } - if c.Transport == nil { - t := &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - Proxy: http.ProxyFromEnvironment, - // Half of linux's default open files limit (1024). - MaxIdleConnsPerHost: 512, - } - SetExpectContinueTimeout(t, 5*time.Second) - c.Transport = t - } - if c.client == nil { - c.client = &http.Client{ - // CheckRedirect: redirectPolicyFunc, - Transport: c.Transport, - } - } -} - -// Authenticate connects to the Swift server. -// -// If you don't call it before calling one of the connection methods -// then it will be called for you on the first access. -func (c *Connection) Authenticate() (err error) { - c.authLock.Lock() - defer c.authLock.Unlock() - return c.authenticate() -} - -// Internal implementation of Authenticate -// -// Call with authLock held -func (c *Connection) authenticate() (err error) { - c.setDefaults() - - // Flush the keepalives connection - if we are - // re-authenticating then stuff has gone wrong - flushKeepaliveConnections(c.Transport) - - if c.Auth == nil { - c.Auth, err = newAuth(c) - if err != nil { - return - } - } - - retries := 1 -again: - var req *http.Request - req, err = c.Auth.Request(c) - if err != nil { - return - } - if req != nil { - timer := time.NewTimer(c.ConnectTimeout) - defer timer.Stop() - var resp *http.Response - resp, err = c.doTimeoutRequest(timer, req) - if err != nil { - return - } - defer func() { - drainAndClose(resp.Body, &err) - // Flush the auth connection - we don't want to keep - // it open if keepalives were enabled - flushKeepaliveConnections(c.Transport) - }() - if err = c.parseHeaders(resp, authErrorMap); err != nil { - // Try again for a limited number of times on - // AuthorizationFailed or BadRequest. This allows us - // to try some alternate forms of the request - if (err == AuthorizationFailed || err == BadRequest) && retries > 0 { - retries-- - goto again - } - return - } - err = c.Auth.Response(resp) - if err != nil { - return - } - } - if customAuth, isCustom := c.Auth.(CustomEndpointAuthenticator); isCustom && c.EndpointType != "" { - c.StorageUrl = customAuth.StorageUrlForEndpoint(c.EndpointType) - } else { - c.StorageUrl = c.Auth.StorageUrl(c.Internal) - } - c.AuthToken = c.Auth.Token() - if do, ok := c.Auth.(Expireser); ok { - c.Expires = do.Expires() - } else { - c.Expires = time.Time{} - } - - if !c.authenticated() { - err = newError(0, "Response didn't have storage url and auth token") - return - } - return -} - -// Get an authToken and url -// -// The Url may be updated if it needed to authenticate using the OnReAuth function -func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) { - c.authLock.Lock() - defer c.authLock.Unlock() - targetUrlOut = targetUrlIn - if !c.authenticated() { - err = c.authenticate() - if err != nil { - return - } - if OnReAuth != nil { - targetUrlOut, err = OnReAuth() - if err != nil { - return - } - } - } - authToken = c.AuthToken - return -} - -// flushKeepaliveConnections is called to flush pending requests after an error. -func flushKeepaliveConnections(transport http.RoundTripper) { - if tr, ok := transport.(interface { - CloseIdleConnections() - }); ok { - tr.CloseIdleConnections() - } -} - -// UnAuthenticate removes the authentication from the Connection. -func (c *Connection) UnAuthenticate() { - c.authLock.Lock() - c.StorageUrl = "" - c.AuthToken = "" - c.authLock.Unlock() -} - -// Authenticated returns a boolean to show if the current connection -// is authenticated. -// -// Doesn't actually check the credentials against the server. -func (c *Connection) Authenticated() bool { - c.authLock.Lock() - defer c.authLock.Unlock() - return c.authenticated() -} - -// Internal version of Authenticated() -// -// Call with authLock held -func (c *Connection) authenticated() bool { - if c.StorageUrl == "" || c.AuthToken == "" { - return false - } - if c.Expires.IsZero() { - return true - } - timeUntilExpiry := c.Expires.Sub(time.Now()) - return timeUntilExpiry >= 60*time.Second -} - -// SwiftInfo contains the JSON object returned by Swift when the /info -// route is queried. The object contains, among others, the Swift version, -// the enabled middlewares and their configuration -type SwiftInfo map[string]interface{} - -func (i SwiftInfo) SupportsBulkDelete() bool { - _, val := i["bulk_delete"] - return val -} - -func (i SwiftInfo) SupportsSLO() bool { - _, val := i["slo"] - return val -} - -func (i SwiftInfo) SLOMinSegmentSize() int64 { - if slo, ok := i["slo"].(map[string]interface{}); ok { - val, _ := slo["min_segment_size"].(float64) - return int64(val) - } - return 1 -} - -// Discover Swift configuration by doing a request against /info -func (c *Connection) QueryInfo() (infos SwiftInfo, err error) { - infoUrl, err := url.Parse(c.StorageUrl) - if err != nil { - return nil, err - } - infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info") - resp, err := c.client.Get(infoUrl.String()) - if err == nil { - if resp.StatusCode != http.StatusOK { - drainAndClose(resp.Body, nil) - return nil, fmt.Errorf("Invalid status code for info request: %d", resp.StatusCode) - } - err = readJson(resp, &infos) - if err == nil { - c.authLock.Lock() - c.swiftInfo = infos - c.authLock.Unlock() - } - return infos, err - } - return nil, err -} - -func (c *Connection) cachedQueryInfo() (infos SwiftInfo, err error) { - c.authLock.Lock() - infos = c.swiftInfo - c.authLock.Unlock() - if infos == nil { - infos, err = c.QueryInfo() - if err != nil { - return - } - } - return infos, nil -} - -// RequestOpts contains parameters for Connection.storage. -type RequestOpts struct { - Container string - ObjectName string - Operation string - Parameters url.Values - Headers Headers - ErrorMap errorMap - NoResponse bool - Body io.Reader - Retries int - // if set this is called on re-authentication to refresh the targetUrl - OnReAuth func() (string, error) -} - -// Call runs a remote command on the targetUrl, returns a -// response, headers and possible error. -// -// operation is GET, HEAD etc -// container is the name of a container -// Any other parameters (if not None) are added to the targetUrl -// -// Returns a response or an error. If response is returned then -// the resp.Body must be read completely and -// resp.Body.Close() must be called on it, unless noResponse is set in -// which case the body will be closed in this function -// -// If "Content-Length" is set in p.Headers it will be used - this can -// be used to override the default chunked transfer encoding for -// uploads. -// -// This will Authenticate if necessary, and re-authenticate if it -// receives a 401 error which means the token has expired -// -// This method is exported so extensions can call it. -func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) { - c.authLock.Lock() - c.setDefaults() - c.authLock.Unlock() - retries := p.Retries - if retries == 0 { - retries = c.Retries - } - var req *http.Request - for { - var authToken string - if targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth); err != nil { - return //authentication failure - } - var URL *url.URL - URL, err = url.Parse(targetUrl) - if err != nil { - return - } - if p.Container != "" { - URL.Path += "/" + p.Container - if p.ObjectName != "" { - URL.Path += "/" + p.ObjectName - } - } - if p.Parameters != nil { - URL.RawQuery = p.Parameters.Encode() - } - timer := time.NewTimer(c.ConnectTimeout) - defer timer.Stop() - reader := p.Body - if reader != nil { - reader = newWatchdogReader(reader, c.Timeout, timer) - } - req, err = http.NewRequest(p.Operation, URL.String(), reader) - if err != nil { - return - } - if p.Headers != nil { - for k, v := range p.Headers { - // Set ContentLength in req if the user passed it in in the headers - if k == "Content-Length" { - req.ContentLength, err = strconv.ParseInt(v, 10, 64) - if err != nil { - err = fmt.Errorf("Invalid %q header %q: %v", k, v, err) - return - } - } else { - req.Header.Add(k, v) - } - } - } - req.Header.Add("User-Agent", c.UserAgent) - req.Header.Add("X-Auth-Token", authToken) - - _, hasCL := p.Headers["Content-Length"] - AddExpectAndTransferEncoding(req, hasCL) - - resp, err = c.doTimeoutRequest(timer, req) - if err != nil { - if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 { - retries-- - continue - } - return - } - // Check to see if token has expired - if resp.StatusCode == 401 && retries > 0 { - drainAndClose(resp.Body, nil) - c.UnAuthenticate() - retries-- - } else { - break - } - } - - headers = readHeaders(resp) - if err = c.parseHeaders(resp, p.ErrorMap); err != nil { - return - } - if p.NoResponse { - drainAndClose(resp.Body, &err) - if err != nil { - return - } - } else { - // Cancel the request on timeout - cancel := func() { - cancelRequest(c.Transport, req) - } - // Wrap resp.Body to make it obey an idle timeout - resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel) - } - return -} - -// storage runs a remote command on a the storage url, returns a -// response, headers and possible error. -// -// operation is GET, HEAD etc -// container is the name of a container -// Any other parameters (if not None) are added to the storage url -// -// Returns a response or an error. If response is returned then -// resp.Body.Close() must be called on it, unless noResponse is set in -// which case the body will be closed in this function -// -// This will Authenticate if necessary, and re-authenticate if it -// receives a 401 error which means the token has expired -func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) { - p.OnReAuth = func() (string, error) { - return c.StorageUrl, nil - } - c.authLock.Lock() - url := c.StorageUrl - c.authLock.Unlock() - return c.Call(url, p) -} - -// readLines reads the response into an array of strings. -// -// Closes the response when done -func readLines(resp *http.Response) (lines []string, err error) { - defer drainAndClose(resp.Body, &err) - reader := bufio.NewReader(resp.Body) - buffer := bytes.NewBuffer(make([]byte, 0, 128)) - var part []byte - var prefix bool - for { - if part, prefix, err = reader.ReadLine(); err != nil { - break - } - buffer.Write(part) - if !prefix { - lines = append(lines, buffer.String()) - buffer.Reset() - } - } - if err == io.EOF { - err = nil - } - return -} - -// readJson reads the response into the json type passed in -// -// Closes the response when done -func readJson(resp *http.Response, result interface{}) (err error) { - defer drainAndClose(resp.Body, &err) - decoder := json.NewDecoder(resp.Body) - return decoder.Decode(result) -} - -/* ------------------------------------------------------------ */ - -// ContainersOpts is options for Containers() and ContainerNames() -type ContainersOpts struct { - Limit int // For an integer value n, limits the number of results to at most n values. - Prefix string // Given a string value x, return container names matching the specified prefix. - Marker string // Given a string value x, return container names greater in value than the specified marker. - EndMarker string // Given a string value x, return container names less in value than the specified marker. - Headers Headers // Any additional HTTP headers - can be nil -} - -// parse the ContainerOpts -func (opts *ContainersOpts) parse() (url.Values, Headers) { - v := url.Values{} - var h Headers - if opts != nil { - if opts.Limit > 0 { - v.Set("limit", strconv.Itoa(opts.Limit)) - } - if opts.Prefix != "" { - v.Set("prefix", opts.Prefix) - } - if opts.Marker != "" { - v.Set("marker", opts.Marker) - } - if opts.EndMarker != "" { - v.Set("end_marker", opts.EndMarker) - } - h = opts.Headers - } - return v, h -} - -// ContainerNames returns a slice of names of containers in this account. -func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) { - v, h := opts.parse() - resp, _, err := c.storage(RequestOpts{ - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - lines, err := readLines(resp) - return lines, err -} - -// Container contains information about a container -type Container struct { - Name string // Name of the container - Count int64 // Number of objects in the container - Bytes int64 // Total number of bytes used in the container -} - -// Containers returns a slice of structures with full information as -// described in Container. -func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) { - v, h := opts.parse() - v.Set("format", "json") - resp, _, err := c.storage(RequestOpts{ - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - var containers []Container - err = readJson(resp, &containers) - return containers, err -} - -// containersAllOpts makes a copy of opts if set or makes a new one and -// overrides Limit and Marker -func containersAllOpts(opts *ContainersOpts) *ContainersOpts { - var newOpts ContainersOpts - if opts != nil { - newOpts = *opts - } - if newOpts.Limit == 0 { - newOpts.Limit = allContainersLimit - } - newOpts.Marker = "" - return &newOpts -} - -// ContainersAll is like Containers but it returns all the Containers -// -// It calls Containers multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { - opts = containersAllOpts(opts) - containers := make([]Container, 0) - for { - newContainers, err := c.Containers(opts) - if err != nil { - return nil, err - } - containers = append(containers, newContainers...) - if len(newContainers) < opts.Limit { - break - } - opts.Marker = newContainers[len(newContainers)-1].Name - } - return containers, nil -} - -// ContainerNamesAll is like ContainerNamess but it returns all the Containers -// -// It calls ContainerNames multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) { - opts = containersAllOpts(opts) - containers := make([]string, 0) - for { - newContainers, err := c.ContainerNames(opts) - if err != nil { - return nil, err - } - containers = append(containers, newContainers...) - if len(newContainers) < opts.Limit { - break - } - opts.Marker = newContainers[len(newContainers)-1] - } - return containers, nil -} - -/* ------------------------------------------------------------ */ - -// ObjectOpts is options for Objects() and ObjectNames() -type ObjectsOpts struct { - Limit int // For an integer value n, limits the number of results to at most n values. - Marker string // Given a string value x, return object names greater in value than the specified marker. - EndMarker string // Given a string value x, return object names less in value than the specified marker - Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x. - Path string // For a string value x, return the object names nested in the pseudo path - Delimiter rune // For a character c, return all the object names nested in the container - Headers Headers // Any additional HTTP headers - can be nil -} - -// parse reads values out of ObjectsOpts -func (opts *ObjectsOpts) parse() (url.Values, Headers) { - v := url.Values{} - var h Headers - if opts != nil { - if opts.Limit > 0 { - v.Set("limit", strconv.Itoa(opts.Limit)) - } - if opts.Marker != "" { - v.Set("marker", opts.Marker) - } - if opts.EndMarker != "" { - v.Set("end_marker", opts.EndMarker) - } - if opts.Prefix != "" { - v.Set("prefix", opts.Prefix) - } - if opts.Path != "" { - v.Set("path", opts.Path) - } - if opts.Delimiter != 0 { - v.Set("delimiter", string(opts.Delimiter)) - } - h = opts.Headers - } - return v, h -} - -// ObjectNames returns a slice of names of objects in a given container. -func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) { - v, h := opts.parse() - resp, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - return readLines(resp) -} - -// Object contains information about an object -type Object struct { - Name string `json:"name"` // object name - ContentType string `json:"content_type"` // eg application/directory - Bytes int64 `json:"bytes"` // size in bytes - ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server - LastModified time.Time // Last modified time converted to a time.Time - Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" - SLOHash string `json:"slo_etag"` // MD5 hash of all segments' MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" - PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist - SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" - ObjectType ObjectType // type of this object -} - -// Objects returns a slice of Object with information about each -// object in the container. -// -// If Delimiter is set in the opts then PseudoDirectory may be set, -// with ContentType 'application/directory'. These are not real -// objects but represent directories of objects which haven't had an -// object created for them. -func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) { - v, h := opts.parse() - v.Set("format", "json") - resp, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - var objects []Object - err = readJson(resp, &objects) - // Convert Pseudo directories and dates - for i := range objects { - object := &objects[i] - if object.SubDir != "" { - object.Name = object.SubDir - object.PseudoDirectory = true - object.ContentType = "application/directory" - } - if object.ServerLastModified != "" { - // 2012-11-11T14:49:47.887250 - // - // Remove fractional seconds if present. This - // then keeps it consistent with Object - // which can only return timestamps accurate - // to 1 second - // - // The TimeFormat will parse fractional - // seconds if desired though - datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0] - object.LastModified, err = time.Parse(TimeFormat, datetime) - if err != nil { - return nil, err - } - } - if object.SLOHash != "" { - object.ObjectType = StaticLargeObjectType - } - } - return objects, err -} - -// objectsAllOpts makes a copy of opts if set or makes a new one and -// overrides Limit and Marker -func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts { - var newOpts ObjectsOpts - if opts != nil { - newOpts = *opts - } - if newOpts.Limit == 0 { - newOpts.Limit = Limit - } - newOpts.Marker = "" - return &newOpts -} - -// A closure defined by the caller to iterate through all objects -// -// Call Objects or ObjectNames from here with the *ObjectOpts passed in -// -// Do whatever is required with the results then return them -type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error) - -// ObjectsWalk is uses to iterate through all the objects in chunks as -// returned by Objects or ObjectNames using the Marker and Limit -// parameters in the ObjectsOpts. -// -// Pass in a closure `walkFn` which calls Objects or ObjectNames with -// the *ObjectsOpts passed to it and does something with the results. -// -// Errors will be returned from this function -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error { - opts = objectsAllOpts(opts, allObjectsChanLimit) - for { - objects, err := walkFn(opts) - if err != nil { - return err - } - var n int - var last string - switch objects := objects.(type) { - case []string: - n = len(objects) - if n > 0 { - last = objects[len(objects)-1] - } - case []Object: - n = len(objects) - if n > 0 { - last = objects[len(objects)-1].Name - } - default: - panic("Unknown type returned to ObjectsWalk") - } - if n < opts.Limit { - break - } - opts.Marker = last - } - return nil -} - -// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice -// -// It calls Objects multiple times using the Marker parameter -func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) { - objects := make([]Object, 0) - err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { - newObjects, err := c.Objects(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - return objects, err -} - -// ObjectNamesAll is like ObjectNames but it returns all the Objects -// -// It calls ObjectNames multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) { - objects := make([]string, 0) - err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { - newObjects, err := c.ObjectNames(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - return objects, err -} - -// Account contains information about this account. -type Account struct { - BytesUsed int64 // total number of bytes used - Containers int64 // total number of containers - Objects int64 // total number of objects -} - -// getInt64FromHeader is a helper function to decode int64 from header. -func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) { - value := resp.Header.Get(header) - result, err = strconv.ParseInt(value, 10, 64) - if err != nil { - err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err) - } - return -} - -// Account returns info about the account in an Account struct. -func (c *Connection) Account() (info Account, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Operation: "HEAD", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into a dict - // - // {'Accept-Ranges': 'bytes', - // 'Content-Length': '0', - // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT', - // 'X-Account-Bytes-Used': '316598182', - // 'X-Account-Container-Count': '4', - // 'X-Account-Object-Count': '1433'} - if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil { - return - } - if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil { - return - } - if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil { - return - } - return -} - -// AccountUpdate adds, replaces or remove account metadata. -// -// Add or update keys by mentioning them in the Headers. -// -// Remove keys by setting them to an empty string. -func (c *Connection) AccountUpdate(h Headers) error { - _, _, err := c.storage(RequestOpts{ - Operation: "POST", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ContainerCreate creates a container. -// -// If you don't want to add Headers just pass in nil -// -// No error is returned if it already exists but the metadata if any will be updated. -func (c *Connection) ContainerCreate(container string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "PUT", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ContainerDelete deletes a container. -// -// May return ContainerDoesNotExist or ContainerNotEmpty -func (c *Connection) ContainerDelete(container string) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "DELETE", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - return err -} - -// Container returns info about a single container including any -// metadata in the headers. -func (c *Connection) Container(container string) (info Container, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - Operation: "HEAD", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into the struct - info.Name = container - if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil { - return - } - if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil { - return - } - return -} - -// ContainerUpdate adds, replaces or removes container metadata. -// -// Add or update keys by mentioning them in the Metadata. -// -// Remove keys by setting them to an empty string. -// -// Container metadata can only be read with Container() not with Containers(). -func (c *Connection) ContainerUpdate(container string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "POST", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ------------------------------------------------------------ - -// ObjectCreateFile represents a swift object open for writing -type ObjectCreateFile struct { - checkHash bool // whether we are checking the hash - pipeReader *io.PipeReader // pipe for the caller to use - pipeWriter *io.PipeWriter - hash hash.Hash // hash being build up as we go along - done chan struct{} // signals when the upload has finished - resp *http.Response // valid when done has signalled - err error // ditto - headers Headers // ditto -} - -// Write bytes to the object - see io.Writer -func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { - n, err = file.pipeWriter.Write(p) - if err == io.ErrClosedPipe { - if file.err != nil { - return 0, file.err - } - return 0, newError(500, "Write on closed file") - } - if err == nil && file.checkHash { - _, _ = file.hash.Write(p) - } - return -} - -// Close the object and checks the md5sum if it was required. -// -// Also returns any other errors from the server (eg container not -// found) so it is very important to check the errors on this method. -func (file *ObjectCreateFile) Close() error { - // Close the body - err := file.pipeWriter.Close() - if err != nil { - return err - } - - // Wait for the HTTP operation to complete - <-file.done - - // Check errors - if file.err != nil { - return file.err - } - if file.checkHash { - receivedMd5 := strings.ToLower(file.headers["Etag"]) - calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - return ObjectCorrupted - } - } - return nil -} - -// Headers returns the response headers from the created object if the upload -// has been completed. The Close() method must be called on an ObjectCreateFile -// before this method. -func (file *ObjectCreateFile) Headers() (Headers, error) { - // error out if upload is not complete. - select { - case <-file.done: - default: - return nil, fmt.Errorf("Cannot get metadata, object upload failed or has not yet completed.") - } - return file.headers, nil -} - -// Check it satisfies the interface -var _ io.WriteCloser = &ObjectCreateFile{} - -// objectPutHeaders create a set of headers for a PUT -// -// It guesses the contentType from the objectName if it isn't set -// -// checkHash may be changed -func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers { - if contentType == "" { - contentType = mime.TypeByExtension(path.Ext(objectName)) - if contentType == "" { - contentType = "application/octet-stream" - } - } - // Meta stuff - extraHeaders := map[string]string{ - "Content-Type": contentType, - } - for key, value := range h { - extraHeaders[key] = value - } - if Hash != "" { - extraHeaders["Etag"] = Hash - *checkHash = false // the server will do it - } - return extraHeaders -} - -// ObjectCreate creates or updates the object in the container. It -// returns an io.WriteCloser you should write the contents to. You -// MUST call Close() on it and you MUST check the error return from -// Close(). -// -// If checkHash is True then it will calculate the MD5 Hash of the -// file as it is being uploaded and check it against that returned -// from the server. If it is wrong then it will return -// ObjectCorrupted on Close() -// -// If you know the MD5 hash of the object ahead of time then set the -// Hash parameter and it will be sent to the server (as an Etag -// header) and the server will check the MD5 itself after the upload, -// and this will return ObjectCorrupted on Close() if it is incorrect. -// -// If you don't want any error protection (not recommended) then set -// checkHash to false and Hash to "". -// -// If contentType is set it will be used, otherwise one will be -// guessed from objectName using mime.TypeByExtension -func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) { - extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) - pipeReader, pipeWriter := io.Pipe() - file = &ObjectCreateFile{ - hash: md5.New(), - checkHash: checkHash, - pipeReader: pipeReader, - pipeWriter: pipeWriter, - done: make(chan struct{}), - } - // Run the PUT in the background piping it data - go func() { - opts := RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "PUT", - Headers: extraHeaders, - Body: pipeReader, - NoResponse: true, - ErrorMap: objectErrorMap, - } - file.resp, file.headers, file.err = c.storage(opts) - // Signal finished - pipeReader.Close() - close(file.done) - }() - return -} - -func (c *Connection) objectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers, parameters url.Values) (headers Headers, err error) { - extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) - hash := md5.New() - var body io.Reader = contents - if checkHash { - body = io.TeeReader(contents, hash) - } - _, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "PUT", - Headers: extraHeaders, - Body: body, - NoResponse: true, - ErrorMap: objectErrorMap, - Parameters: parameters, - }) - if err != nil { - return - } - if checkHash { - receivedMd5 := strings.ToLower(headers["Etag"]) - calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - err = ObjectCorrupted - return - } - } - return -} - -// ObjectPut creates or updates the path in the container from -// contents. contents should be an open io.Reader which will have all -// its contents read. -// -// This is a low level interface. -// -// If checkHash is True then it will calculate the MD5 Hash of the -// file as it is being uploaded and check it against that returned -// from the server. If it is wrong then it will return -// ObjectCorrupted. -// -// If you know the MD5 hash of the object ahead of time then set the -// Hash parameter and it will be sent to the server (as an Etag -// header) and the server will check the MD5 itself after the upload, -// and this will return ObjectCorrupted if it is incorrect. -// -// If you don't want any error protection (not recommended) then set -// checkHash to false and Hash to "". -// -// If contentType is set it will be used, otherwise one will be -// guessed from objectName using mime.TypeByExtension -func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { - return c.objectPut(container, objectName, contents, checkHash, Hash, contentType, h, nil) -} - -// ObjectPutBytes creates an object from a []byte in a container. -// -// This is a simplified interface which checks the MD5. -func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { - buf := bytes.NewBuffer(contents) - h := Headers{"Content-Length": strconv.Itoa(len(contents))} - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) - return -} - -// ObjectPutString creates an object from a string in a container. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { - buf := strings.NewReader(contents) - h := Headers{"Content-Length": strconv.Itoa(len(contents))} - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) - return -} - -// ObjectOpenFile represents a swift object open for reading -type ObjectOpenFile struct { - connection *Connection // stored copy of Connection used in Open - container string // stored copy of container used in Open - objectName string // stored copy of objectName used in Open - headers Headers // stored copy of headers used in Open - resp *http.Response // http connection - body io.Reader // read data from this - checkHash bool // true if checking MD5 - hash hash.Hash // currently accumulating MD5 - bytes int64 // number of bytes read on this connection - eof bool // whether we have read end of file - pos int64 // current position when reading - lengthOk bool // whether length is valid - length int64 // length of the object if read - seeked bool // whether we have seeked this file or not - overSeeked bool // set if we have seeked to the end or beyond -} - -// Read bytes from the object - see io.Reader -func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { - if file.overSeeked { - return 0, io.EOF - } - n, err = file.body.Read(p) - file.bytes += int64(n) - file.pos += int64(n) - if err == io.EOF { - file.eof = true - } - return -} - -// Seek sets the offset for the next Read to offset, interpreted -// according to whence: 0 means relative to the origin of the file, 1 -// means relative to the current offset, and 2 means relative to the -// end. Seek returns the new offset and an Error, if any. -// -// Seek uses HTTP Range headers which, if the file pointer is moved, -// will involve reopening the HTTP connection. -// -// Note that you can't seek to the end of a file or beyond; HTTP Range -// requests don't support the file pointer being outside the data, -// unlike os.File -// -// Seek(0, 1) will return the current file pointer. -func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { - file.overSeeked = false - switch whence { - case 0: // relative to start - newPos = offset - case 1: // relative to current - newPos = file.pos + offset - case 2: // relative to end - if !file.lengthOk { - return file.pos, newError(0, "Length of file unknown so can't seek from end") - } - newPos = file.length + offset - if offset >= 0 { - file.overSeeked = true - return - } - default: - panic("Unknown whence in ObjectOpenFile.Seek") - } - // If at correct position (quite likely), do nothing - if newPos == file.pos { - return - } - // Close the file... - file.seeked = true - err = file.Close() - if err != nil { - return - } - // ...and re-open with a Range header - if file.headers == nil { - file.headers = Headers{} - } - if newPos > 0 { - file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos) - } else { - delete(file.headers, "Range") - } - newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers) - if err != nil { - return - } - // Update the file - file.resp = newFile.resp - file.body = newFile.body - file.checkHash = false - file.pos = newPos - return -} - -// Length gets the objects content length either from a cached copy or -// from the server. -func (file *ObjectOpenFile) Length() (int64, error) { - if !file.lengthOk { - info, _, err := file.connection.Object(file.container, file.objectName) - file.length = info.Bytes - file.lengthOk = (err == nil) - return file.length, err - } - return file.length, nil -} - -// Close the object and checks the length and md5sum if it was -// required and all the object was read -func (file *ObjectOpenFile) Close() (err error) { - // Close the body at the end - defer checkClose(file.resp.Body, &err) - - // If not end of file or seeked then can't check anything - if !file.eof || file.seeked { - return - } - - // Check the MD5 sum if requested - if file.checkHash { - receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag")) - calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - err = ObjectCorrupted - return - } - } - - // Check to see we read the correct number of bytes - if file.lengthOk && file.length != file.bytes { - err = ObjectCorrupted - return - } - return -} - -// Check it satisfies the interfaces -var _ io.ReadCloser = &ObjectOpenFile{} -var _ io.Seeker = &ObjectOpenFile{} - -func (c *Connection) objectOpenBase(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { - var resp *http.Response - opts := RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "GET", - ErrorMap: objectErrorMap, - Headers: h, - Parameters: parameters, - } - resp, headers, err = c.storage(opts) - if err != nil { - return - } - // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set - if checkHash && headers.IsLargeObject() { - // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) - checkHash = false - } - file = &ObjectOpenFile{ - connection: c, - container: container, - objectName: objectName, - headers: h, - resp: resp, - checkHash: checkHash, - body: resp.Body, - } - if checkHash { - file.hash = md5.New() - file.body = io.TeeReader(resp.Body, file.hash) - } - // Read Content-Length - if resp.Header.Get("Content-Length") != "" { - file.length, err = getInt64FromHeader(resp, "Content-Length") - file.lengthOk = (err == nil) - } - return -} - -func (c *Connection) objectOpen(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { - err = withLORetry(0, func() (Headers, int64, error) { - file, headers, err = c.objectOpenBase(container, objectName, checkHash, h, parameters) - if err != nil { - return headers, 0, err - } - return headers, file.length, nil - }) - return -} - -// ObjectOpen returns an ObjectOpenFile for reading the contents of -// the object. This satisfies the io.ReadCloser and the io.Seeker -// interfaces. -// -// You must call Close() on contents when finished -// -// Returns the headers of the response. -// -// If checkHash is true then it will calculate the md5sum of the file -// as it is being received and check it against that returned from the -// server. If it is wrong then it will return ObjectCorrupted. It -// will also check the length returned. No checking will be done if -// you don't read all the contents. -// -// Note that objects with X-Object-Manifest or X-Static-Large-Object -// set won't ever have their md5sum's checked as the md5sum reported -// on the object is actually the md5sum of the md5sums of the -// parts. This isn't very helpful to detect a corrupted download as -// the size of the parts aren't known without doing more operations. -// If you want to ensure integrity of an object with a manifest then -// you will need to download everything in the manifest separately. -// -// headers["Content-Type"] will give the content type if desired. -func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { - return c.objectOpen(container, objectName, checkHash, h, nil) -} - -// ObjectGet gets the object into the io.Writer contents. -// -// Returns the headers of the response. -// -// If checkHash is true then it will calculate the md5sum of the file -// as it is being received and check it against that returned from the -// server. If it is wrong then it will return ObjectCorrupted. -// -// headers["Content-Type"] will give the content type if desired. -func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) { - file, headers, err := c.ObjectOpen(container, objectName, checkHash, h) - if err != nil { - return - } - defer checkClose(file, &err) - _, err = io.Copy(contents, file) - return -} - -// ObjectGetBytes returns an object as a []byte. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) { - var buf bytes.Buffer - _, err = c.ObjectGet(container, objectName, &buf, true, nil) - contents = buf.Bytes() - return -} - -// ObjectGetString returns an object as a string. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) { - var buf bytes.Buffer - _, err = c.ObjectGet(container, objectName, &buf, true, nil) - contents = buf.String() - return -} - -// ObjectDelete deletes the object. -// -// May return ObjectNotFound if the object isn't found -func (c *Connection) ObjectDelete(container string, objectName string) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "DELETE", - ErrorMap: objectErrorMap, - }) - return err -} - -// ObjectTempUrl returns a temporary URL for an object -func (c *Connection) ObjectTempUrl(container string, objectName string, secretKey string, method string, expires time.Time) string { - mac := hmac.New(sha1.New, []byte(secretKey)) - prefix, _ := url.Parse(c.StorageUrl) - body := fmt.Sprintf("%s\n%d\n%s/%s/%s", method, expires.Unix(), prefix.Path, container, objectName) - mac.Write([]byte(body)) - sig := hex.EncodeToString(mac.Sum(nil)) - return fmt.Sprintf("%s/%s/%s?temp_url_sig=%s&temp_url_expires=%d", c.StorageUrl, container, objectName, sig, expires.Unix()) -} - -// parseResponseStatus parses string like "200 OK" and returns Error. -// -// For status codes beween 200 and 299, this returns nil. -func parseResponseStatus(resp string, errorMap errorMap) error { - code := 0 - reason := resp - t := strings.SplitN(resp, " ", 2) - if len(t) == 2 { - ncode, err := strconv.Atoi(t[0]) - if err == nil { - code = ncode - reason = t[1] - } - } - if errorMap != nil { - if err, ok := errorMap[code]; ok { - return err - } - } - if 200 <= code && code <= 299 { - return nil - } - return newError(code, reason) -} - -// BulkDeleteResult stores results of BulkDelete(). -// -// Individual errors may (or may not) be returned by Errors. -// Errors is a map whose keys are a full path of where the object was -// to be deleted, and whose values are Error objects. A full path of -// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". -type BulkDeleteResult struct { - NumberNotFound int64 // # of objects not found. - NumberDeleted int64 // # of deleted objects. - Errors map[string]error // Mapping between object name and an error. - Headers Headers // Response HTTP headers. -} - -func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, err error) { - var buffer bytes.Buffer - for _, s := range objects { - u := url.URL{Path: s} - buffer.WriteString(u.String() + "\n") - } - resp, headers, err := c.storage(RequestOpts{ - Operation: "DELETE", - Parameters: url.Values{"bulk-delete": []string{"1"}}, - Headers: Headers{ - "Accept": "application/json", - "Content-Type": "text/plain", - "Content-Length": strconv.Itoa(buffer.Len()), - }, - ErrorMap: ContainerErrorMap, - Body: &buffer, - }) - if err != nil { - return - } - var jsonResult struct { - NotFound int64 `json:"Number Not Found"` - Status string `json:"Response Status"` - Errors [][]string - Deleted int64 `json:"Number Deleted"` - } - err = readJson(resp, &jsonResult) - if err != nil { - return - } - - err = parseResponseStatus(jsonResult.Status, objectErrorMap) - result.NumberNotFound = jsonResult.NotFound - result.NumberDeleted = jsonResult.Deleted - result.Headers = headers - el := make(map[string]error, len(jsonResult.Errors)) - for _, t := range jsonResult.Errors { - if len(t) != 2 { - continue - } - el[t[0]] = parseResponseStatus(t[1], objectErrorMap) - } - result.Errors = el - return -} - -// BulkDelete deletes multiple objectNames from container in one operation. -// -// Some servers may not accept bulk-delete requests since bulk-delete is -// an optional feature of swift - these will return the Forbidden error. -// -// See also: -// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html -// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html -func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { - if len(objectNames) == 0 { - result.Errors = make(map[string]error) - return - } - fullPaths := make([]string, len(objectNames)) - for i, name := range objectNames { - fullPaths[i] = fmt.Sprintf("/%s/%s", container, name) - } - return c.doBulkDelete(fullPaths) -} - -// BulkUploadResult stores results of BulkUpload(). -// -// Individual errors may (or may not) be returned by Errors. -// Errors is a map whose keys are a full path of where an object was -// to be created, and whose values are Error objects. A full path of -// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". -type BulkUploadResult struct { - NumberCreated int64 // # of created objects. - Errors map[string]error // Mapping between object name and an error. - Headers Headers // Response HTTP headers. -} - -// BulkUpload uploads multiple files in one operation. -// -// uploadPath can be empty, a container name, or a pseudo-directory -// within a container. If uploadPath is empty, new containers may be -// automatically created. -// -// Files are read from dataStream. The format of the stream is specified -// by the format parameter. Available formats are: -// * UploadTar - Plain tar stream. -// * UploadTarGzip - Gzip compressed tar stream. -// * UploadTarBzip2 - Bzip2 compressed tar stream. -// -// Some servers may not accept bulk-upload requests since bulk-upload is -// an optional feature of swift - these will return the Forbidden error. -// -// See also: -// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html -// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html -func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) { - extraHeaders := Headers{"Accept": "application/json"} - for key, value := range h { - extraHeaders[key] = value - } - // The following code abuses Container parameter intentionally. - // The best fix might be to rename Container to UploadPath. - resp, headers, err := c.storage(RequestOpts{ - Container: uploadPath, - Operation: "PUT", - Parameters: url.Values{"extract-archive": []string{format}}, - Headers: extraHeaders, - ErrorMap: ContainerErrorMap, - Body: dataStream, - }) - if err != nil { - return - } - // Detect old servers which don't support this feature - if headers["Content-Type"] != "application/json" { - err = Forbidden - return - } - var jsonResult struct { - Created int64 `json:"Number Files Created"` - Status string `json:"Response Status"` - Errors [][]string - } - err = readJson(resp, &jsonResult) - if err != nil { - return - } - - err = parseResponseStatus(jsonResult.Status, objectErrorMap) - result.NumberCreated = jsonResult.Created - result.Headers = headers - el := make(map[string]error, len(jsonResult.Errors)) - for _, t := range jsonResult.Errors { - if len(t) != 2 { - continue - } - el[t[0]] = parseResponseStatus(t[1], objectErrorMap) - } - result.Errors = el - return -} - -// Object returns info about a single object including any metadata in the header. -// -// May return ObjectNotFound. -// -// Use headers.ObjectMetadata() to read the metadata in the Headers. -func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { - err = withLORetry(0, func() (Headers, int64, error) { - info, headers, err = c.objectBase(container, objectName) - if err != nil { - return headers, 0, err - } - return headers, info.Bytes, nil - }) - return -} - -func (c *Connection) objectBase(container string, objectName string) (info Object, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "HEAD", - ErrorMap: objectErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into the struct - // HTTP/1.1 200 OK - // Date: Thu, 07 Jun 2010 20:59:39 GMT - // Server: Apache - // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT - // ETag: 8a964ee2a5e88be344f36c22562a6486 - // Content-Length: 512000 - // Content-Type: text/plain; charset=UTF-8 - // X-Object-Meta-Meat: Bacon - // X-Object-Meta-Fruit: Bacon - // X-Object-Meta-Veggie: Bacon - // X-Object-Meta-Dairy: Bacon - info.Name = objectName - info.ContentType = resp.Header.Get("Content-Type") - if resp.Header.Get("Content-Length") != "" { - if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil { - return - } - } - // Currently ceph doesn't return a Last-Modified header for DLO manifests without any segments - // See ceph http://tracker.ceph.com/issues/15812 - if resp.Header.Get("Last-Modified") != "" { - info.ServerLastModified = resp.Header.Get("Last-Modified") - if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil { - return - } - } - - info.Hash = resp.Header.Get("Etag") - if resp.Header.Get("X-Object-Manifest") != "" { - info.ObjectType = DynamicLargeObjectType - } else if resp.Header.Get("X-Static-Large-Object") != "" { - info.ObjectType = StaticLargeObjectType - } - - return -} - -// ObjectUpdate adds, replaces or removes object metadata. -// -// Add or Update keys by mentioning them in the Metadata. Use -// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your -// Metadata to and from normal HTTP headers. -// -// This removes all metadata previously added to the object and -// replaces it with that passed in so to delete keys, just don't -// mention them the headers you pass in. -// -// Object metadata can only be read with Object() not with Objects(). -// -// This can also be used to set headers not already assigned such as -// X-Delete-At or X-Delete-After for expiring objects. -// -// You cannot use this to change any of the object's other headers -// such as Content-Type, ETag, etc. -// -// Refer to copying an object when you need to update metadata or -// other headers such as Content-Type or CORS headers. -// -// May return ObjectNotFound. -func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "POST", - ErrorMap: objectErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// urlPathEscape escapes URL path the in string using URL escaping rules -// -// This mimics url.PathEscape which only available from go 1.8 -func urlPathEscape(in string) string { - var u url.URL - u.Path = in - return u.String() -} - -// ObjectCopy does a server side copy of an object to a new position -// -// All metadata is preserved. If metadata is set in the headers then -// it overrides the old metadata on the copied object. -// -// The destination container must exist before the copy. -// -// You can use this to copy an object to itself - this is the only way -// to update the content type of an object. -func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { - // Meta stuff - extraHeaders := map[string]string{ - "Destination": urlPathEscape(dstContainer + "/" + dstObjectName), - } - for key, value := range h { - extraHeaders[key] = value - } - _, headers, err = c.storage(RequestOpts{ - Container: srcContainer, - ObjectName: srcObjectName, - Operation: "COPY", - ErrorMap: objectErrorMap, - NoResponse: true, - Headers: extraHeaders, - }) - return -} - -// ObjectMove does a server side move of an object to a new position -// -// This is a convenience method which calls ObjectCopy then ObjectDelete -// -// All metadata is preserved. -// -// The destination container must exist before the copy. -func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) { - _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil) - if err != nil { - return - } - return c.ObjectDelete(srcContainer, srcObjectName) -} - -// ObjectUpdateContentType updates the content type of an object -// -// This is a convenience method which calls ObjectCopy -// -// All other metadata is preserved. -func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) { - h := Headers{"Content-Type": contentType} - _, err = c.ObjectCopy(container, objectName, container, objectName, h) - return -} - -// ------------------------------------------------------------ - -// VersionContainerCreate is a helper method for creating and enabling version controlled containers. -// -// It builds the current object container, the non-current object version container, and enables versioning. -// -// If the server doesn't support versioning then it will return -// Forbidden however it will have created both the containers at that point. -func (c *Connection) VersionContainerCreate(current, version string) error { - if err := c.ContainerCreate(version, nil); err != nil { - return err - } - if err := c.ContainerCreate(current, nil); err != nil { - return err - } - if err := c.VersionEnable(current, version); err != nil { - return err - } - return nil -} - -// VersionEnable enables versioning on the current container with version as the tracking container. -// -// May return Forbidden if this isn't supported by the server -func (c *Connection) VersionEnable(current, version string) error { - h := Headers{"X-Versions-Location": version} - if err := c.ContainerUpdate(current, h); err != nil { - return err - } - // Check to see if the header was set properly - _, headers, err := c.Container(current) - if err != nil { - return err - } - // If failed to set versions header, return Forbidden as the server doesn't support this - if headers["X-Versions-Location"] != version { - return Forbidden - } - return nil -} - -// VersionDisable disables versioning on the current container. -func (c *Connection) VersionDisable(current string) error { - h := Headers{"X-Versions-Location": ""} - if err := c.ContainerUpdate(current, h); err != nil { - return err - } - return nil -} - -// VersionObjectList returns a list of older versions of the object. -// -// Objects are returned in the format / -func (c *Connection) VersionObjectList(version, object string) ([]string, error) { - opts := &ObjectsOpts{ - // <3-character zero-padded hexadecimal character length>/ - Prefix: fmt.Sprintf("%03x", len(object)) + object + "/", - } - return c.ObjectNames(version, opts) -} diff --git a/vendor/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/ncw/swift/timeout_reader.go deleted file mode 100644 index 88ae73328..000000000 --- a/vendor/github.com/ncw/swift/timeout_reader.go +++ /dev/null @@ -1,59 +0,0 @@ -package swift - -import ( - "io" - "time" -) - -// An io.ReadCloser which obeys an idle timeout -type timeoutReader struct { - reader io.ReadCloser - timeout time.Duration - cancel func() -} - -// Returns a wrapper around the reader which obeys an idle -// timeout. The cancel function is called if the timeout happens -func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader { - return &timeoutReader{ - reader: reader, - timeout: timeout, - cancel: cancel, - } -} - -// Read reads up to len(p) bytes into p -// -// Waits at most for timeout for the read to complete otherwise returns a timeout -func (t *timeoutReader) Read(p []byte) (int, error) { - // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? - // Do the read in the background - type result struct { - n int - err error - } - done := make(chan result, 1) - go func() { - n, err := t.reader.Read(p) - done <- result{n, err} - }() - // Wait for the read or the timeout - timer := time.NewTimer(t.timeout) - defer timer.Stop() - select { - case r := <-done: - return r.n, r.err - case <-timer.C: - t.cancel() - return 0, TimeoutError - } - panic("unreachable") // for Go 1.0 -} - -// Close the channel -func (t *timeoutReader) Close() error { - return t.reader.Close() -} - -// Check it satisfies the interface -var _ io.ReadCloser = &timeoutReader{} diff --git a/vendor/github.com/ncw/swift/travis_realserver.sh b/vendor/github.com/ncw/swift/travis_realserver.sh deleted file mode 100644 index 970e94c0d..000000000 --- a/vendor/github.com/ncw/swift/travis_realserver.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -set -e - -if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then - exit 0 -fi - -if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then - echo "Running tests pointing to Rackspace" - export SWIFT_API_KEY=$RACKSPACE_APIKEY - export SWIFT_API_USER=$RACKSPACE_USER - export SWIFT_AUTH_URL=$RACKSPACE_AUTH - go test ./... -fi - -if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then - echo "Running tests pointing to Memset" - export SWIFT_API_KEY=$MEMSET_APIKEY - export SWIFT_API_USER=$MEMSET_USER - export SWIFT_AUTH_URL=$MEMSET_AUTH - go test -fi diff --git a/vendor/github.com/ncw/swift/watchdog_reader.go b/vendor/github.com/ncw/swift/watchdog_reader.go deleted file mode 100644 index 2714c9e1a..000000000 --- a/vendor/github.com/ncw/swift/watchdog_reader.go +++ /dev/null @@ -1,55 +0,0 @@ -package swift - -import ( - "io" - "time" -) - -var watchdogChunkSize = 1 << 20 // 1 MiB - -// An io.Reader which resets a watchdog timer whenever data is read -type watchdogReader struct { - timeout time.Duration - reader io.Reader - timer *time.Timer - chunkSize int -} - -// Returns a new reader which will kick the watchdog timer whenever data is read -func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { - return &watchdogReader{ - timeout: timeout, - reader: reader, - timer: timer, - chunkSize: watchdogChunkSize, - } -} - -// Read reads up to len(p) bytes into p -func (t *watchdogReader) Read(p []byte) (int, error) { - //read from underlying reader in chunks not larger than t.chunkSize - //while resetting the watchdog timer before every read; the small chunk - //size ensures that the timer does not fire when reading a large amount of - //data from a slow connection - start := 0 - end := len(p) - for start < end { - length := end - start - if length > t.chunkSize { - length = t.chunkSize - } - - resetTimer(t.timer, t.timeout) - n, err := t.reader.Read(p[start : start+length]) - start += n - if n == 0 || err != nil { - return start, err - } - } - - resetTimer(t.timer, t.timeout) - return start, nil -} - -// Check it satisfies the interface -var _ io.Reader = &watchdogReader{} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index d4b92663b..000000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e75..000000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 6483ba2af..000000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade0..000000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 7421f326f..000000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 2874a048c..000000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,147 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strings" -) - -// Frame represents a program counter inside a stack frame. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - fmt.Fprintf(s, "%d", f.line()) - case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - fmt.Fprintf(s, "%v", []Frame(st)) - } - case 's': - fmt.Fprintf(s, "%s", []Frame(st)) - } -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/pkg/profile/.travis.yml b/vendor/github.com/pkg/profile/.travis.yml deleted file mode 100644 index 1c9e6bb6b..000000000 --- a/vendor/github.com/pkg/profile/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/profile -go: - - 1.10.x - - 1.12.x - - tip - -script: - - go test github.com/pkg/profile - - go test -race github.com/pkg/profile diff --git a/vendor/github.com/pkg/profile/AUTHORS b/vendor/github.com/pkg/profile/AUTHORS deleted file mode 100644 index 00441d354..000000000 --- a/vendor/github.com/pkg/profile/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Dave Cheney diff --git a/vendor/github.com/pkg/profile/LICENSE b/vendor/github.com/pkg/profile/LICENSE deleted file mode 100644 index f747a8411..000000000 --- a/vendor/github.com/pkg/profile/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Dave Cheney. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/profile/README.md b/vendor/github.com/pkg/profile/README.md deleted file mode 100644 index 37bfa58c5..000000000 --- a/vendor/github.com/pkg/profile/README.md +++ /dev/null @@ -1,54 +0,0 @@ -profile -======= - -Simple profiling support package for Go - -[![Build Status](https://travis-ci.org/pkg/profile.svg?branch=master)](https://travis-ci.org/pkg/profile) [![GoDoc](http://godoc.org/github.com/pkg/profile?status.svg)](http://godoc.org/github.com/pkg/profile) - - -installation ------------- - - go get github.com/pkg/profile - -usage ------ - -Enabling profiling in your application is as simple as one line at the top of your main function - -```go -import "github.com/pkg/profile" - -func main() { - defer profile.Start().Stop() - ... -} -``` - -options -------- - -What to profile is controlled by config value passed to profile.Start. -By default CPU profiling is enabled. - -```go -import "github.com/pkg/profile" - -func main() { - // p.Stop() must be called before the program exits to - // ensure profiling information is written to disk. - p := profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.NoShutdownHook) - ... -} -``` - -Several convenience package level values are provided for cpu, memory, and block (contention) profiling. - -For more complex options, consult the [documentation](http://godoc.org/github.com/pkg/profile). - -contributing ------------- - -We welcome pull requests, bug fixes and issue reports. - -Before proposing a change, please discuss it first by raising an issue. diff --git a/vendor/github.com/pkg/profile/mutex.go b/vendor/github.com/pkg/profile/mutex.go deleted file mode 100644 index e69c5b44d..000000000 --- a/vendor/github.com/pkg/profile/mutex.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.8 - -package profile - -import "runtime" - -func enableMutexProfile() { - runtime.SetMutexProfileFraction(1) -} - -func disableMutexProfile() { - runtime.SetMutexProfileFraction(0) -} diff --git a/vendor/github.com/pkg/profile/mutex17.go b/vendor/github.com/pkg/profile/mutex17.go deleted file mode 100644 index b004c21d5..000000000 --- a/vendor/github.com/pkg/profile/mutex17.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.8 - -package profile - -// mock mutex support for Go 1.7 and earlier. - -func enableMutexProfile() {} - -func disableMutexProfile() {} diff --git a/vendor/github.com/pkg/profile/profile.go b/vendor/github.com/pkg/profile/profile.go deleted file mode 100644 index 20e285427..000000000 --- a/vendor/github.com/pkg/profile/profile.go +++ /dev/null @@ -1,263 +0,0 @@ -// Package profile provides a simple way to manage runtime/pprof -// profiling of your Go application. -package profile - -import ( - "io/ioutil" - "log" - "os" - "os/signal" - "path/filepath" - "runtime" - "runtime/pprof" - "sync/atomic" -) - -const ( - cpuMode = iota - memMode - mutexMode - blockMode - traceMode - threadCreateMode -) - -// Profile represents an active profiling session. -type Profile struct { - // quiet suppresses informational messages during profiling. - quiet bool - - // noShutdownHook controls whether the profiling package should - // hook SIGINT to write profiles cleanly. - noShutdownHook bool - - // mode holds the type of profiling that will be made - mode int - - // path holds the base path where various profiling files are written. - // If blank, the base path will be generated by ioutil.TempDir. - path string - - // memProfileRate holds the rate for the memory profile. - memProfileRate int - - // closer holds a cleanup function that run after each profile - closer func() - - // stopped records if a call to profile.Stop has been made - stopped uint32 -} - -// NoShutdownHook controls whether the profiling package should -// hook SIGINT to write profiles cleanly. -// Programs with more sophisticated signal handling should set -// this to true and ensure the Stop() function returned from Start() -// is called during shutdown. -func NoShutdownHook(p *Profile) { p.noShutdownHook = true } - -// Quiet suppresses informational messages during profiling. -func Quiet(p *Profile) { p.quiet = true } - -// CPUProfile enables cpu profiling. -// It disables any previous profiling settings. -func CPUProfile(p *Profile) { p.mode = cpuMode } - -// DefaultMemProfileRate is the default memory profiling rate. -// See also http://golang.org/pkg/runtime/#pkg-variables -const DefaultMemProfileRate = 4096 - -// MemProfile enables memory profiling. -// It disables any previous profiling settings. -func MemProfile(p *Profile) { - p.memProfileRate = DefaultMemProfileRate - p.mode = memMode -} - -// MemProfileRate enables memory profiling at the preferred rate. -// It disables any previous profiling settings. -func MemProfileRate(rate int) func(*Profile) { - return func(p *Profile) { - p.memProfileRate = rate - p.mode = memMode - } -} - -// MutexProfile enables mutex profiling. -// It disables any previous profiling settings. -func MutexProfile(p *Profile) { p.mode = mutexMode } - -// BlockProfile enables block (contention) profiling. -// It disables any previous profiling settings. -func BlockProfile(p *Profile) { p.mode = blockMode } - -// Trace profile enables execution tracing. -// It disables any previous profiling settings. -func TraceProfile(p *Profile) { p.mode = traceMode } - -// ThreadcreationProfile enables thread creation profiling.. -// It disables any previous profiling settings. -func ThreadcreationProfile(p *Profile) { p.mode = threadCreateMode } - -// ProfilePath controls the base path where various profiling -// files are written. If blank, the base path will be generated -// by ioutil.TempDir. -func ProfilePath(path string) func(*Profile) { - return func(p *Profile) { - p.path = path - } -} - -// Stop stops the profile and flushes any unwritten data. -func (p *Profile) Stop() { - if !atomic.CompareAndSwapUint32(&p.stopped, 0, 1) { - // someone has already called close - return - } - p.closer() - atomic.StoreUint32(&started, 0) -} - -// started is non zero if a profile is running. -var started uint32 - -// Start starts a new profiling session. -// The caller should call the Stop method on the value returned -// to cleanly stop profiling. -func Start(options ...func(*Profile)) interface { - Stop() -} { - if !atomic.CompareAndSwapUint32(&started, 0, 1) { - log.Fatal("profile: Start() already called") - } - - var prof Profile - for _, option := range options { - option(&prof) - } - - path, err := func() (string, error) { - if p := prof.path; p != "" { - return p, os.MkdirAll(p, 0777) - } - return ioutil.TempDir("", "profile") - }() - - if err != nil { - log.Fatalf("profile: could not create initial output directory: %v", err) - } - - logf := func(format string, args ...interface{}) { - if !prof.quiet { - log.Printf(format, args...) - } - } - - switch prof.mode { - case cpuMode: - fn := filepath.Join(path, "cpu.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create cpu profile %q: %v", fn, err) - } - logf("profile: cpu profiling enabled, %s", fn) - pprof.StartCPUProfile(f) - prof.closer = func() { - pprof.StopCPUProfile() - f.Close() - logf("profile: cpu profiling disabled, %s", fn) - } - - case memMode: - fn := filepath.Join(path, "mem.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create memory profile %q: %v", fn, err) - } - old := runtime.MemProfileRate - runtime.MemProfileRate = prof.memProfileRate - logf("profile: memory profiling enabled (rate %d), %s", runtime.MemProfileRate, fn) - prof.closer = func() { - pprof.Lookup("heap").WriteTo(f, 0) - f.Close() - runtime.MemProfileRate = old - logf("profile: memory profiling disabled, %s", fn) - } - - case mutexMode: - fn := filepath.Join(path, "mutex.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create mutex profile %q: %v", fn, err) - } - enableMutexProfile() - logf("profile: mutex profiling enabled, %s", fn) - prof.closer = func() { - if mp := pprof.Lookup("mutex"); mp != nil { - mp.WriteTo(f, 0) - } - f.Close() - disableMutexProfile() - logf("profile: mutex profiling disabled, %s", fn) - } - - case blockMode: - fn := filepath.Join(path, "block.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create block profile %q: %v", fn, err) - } - runtime.SetBlockProfileRate(1) - logf("profile: block profiling enabled, %s", fn) - prof.closer = func() { - pprof.Lookup("block").WriteTo(f, 0) - f.Close() - runtime.SetBlockProfileRate(0) - logf("profile: block profiling disabled, %s", fn) - } - - case threadCreateMode: - fn := filepath.Join(path, "threadcreation.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create thread creation profile %q: %v", fn, err) - } - logf("profile: thread creation profiling enabled, %s", fn) - prof.closer = func() { - if mp := pprof.Lookup("threadcreate"); mp != nil { - mp.WriteTo(f, 0) - } - f.Close() - logf("profile: thread creation profiling disabled, %s", fn) - } - - case traceMode: - fn := filepath.Join(path, "trace.out") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create trace output file %q: %v", fn, err) - } - if err := startTrace(f); err != nil { - log.Fatalf("profile: could not start trace: %v", err) - } - logf("profile: trace enabled, %s", fn) - prof.closer = func() { - stopTrace() - logf("profile: trace disabled, %s", fn) - } - } - - if !prof.noShutdownHook { - go func() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - <-c - - log.Println("profile: caught interrupt, stopping profiles") - prof.Stop() - - os.Exit(0) - }() - } - - return &prof -} diff --git a/vendor/github.com/pkg/profile/trace.go b/vendor/github.com/pkg/profile/trace.go deleted file mode 100644 index b349ed8b2..000000000 --- a/vendor/github.com/pkg/profile/trace.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build go1.7 - -package profile - -import "runtime/trace" - -var startTrace = trace.Start -var stopTrace = trace.Stop diff --git a/vendor/github.com/pkg/profile/trace16.go b/vendor/github.com/pkg/profile/trace16.go deleted file mode 100644 index 6aa6566ef..000000000 --- a/vendor/github.com/pkg/profile/trace16.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !go1.7 - -package profile - -import "io" - -// mock trace support for Go 1.6 and earlier. - -func startTrace(w io.Writer) error { return nil } -func stopTrace() {} diff --git a/vendor/github.com/pkg/sftp/.gitignore b/vendor/github.com/pkg/sftp/.gitignore deleted file mode 100644 index e1ec837c3..000000000 --- a/vendor/github.com/pkg/sftp/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -.*.swo -.*.swp - -server_standalone/server_standalone - -examples/*/id_rsa -examples/*/id_rsa.pub diff --git a/vendor/github.com/pkg/sftp/.travis.yml b/vendor/github.com/pkg/sftp/.travis.yml deleted file mode 100644 index 44cd0e2ee..000000000 --- a/vendor/github.com/pkg/sftp/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -language: go -go_import_path: github.com/pkg/sftp - -# current and previous stable releases, plus tip -# remember to exclude previous and tip for macs below -go: - - 1.10.x - - 1.11.x - - tip - -os: - - linux - - osx - -matrix: - exclude: - - os: osx - go: 1.10.x - - os: osx - go: tip - -sudo: false - -addons: - ssh_known_hosts: - - bitbucket.org - -install: - - go get -t -v ./... - - ssh-keygen -t rsa -q -P "" -f $HOME/.ssh/id_rsa - -script: - - go test -integration -v ./... - - go test -testserver -v ./... - - go test -integration -testserver -v ./... - - go test -race -integration -v ./... - - go test -race -testserver -v ./... - - go test -race -integration -testserver -v ./... diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS deleted file mode 100644 index 5c7196ae6..000000000 --- a/vendor/github.com/pkg/sftp/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -Dave Cheney -Saulius Gurklys -John Eikenberry diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE deleted file mode 100644 index b7b53921e..000000000 --- a/vendor/github.com/pkg/sftp/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) 2013, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md deleted file mode 100644 index 1fb700c41..000000000 --- a/vendor/github.com/pkg/sftp/README.md +++ /dev/null @@ -1,44 +0,0 @@ -sftp ----- - -The `sftp` package provides support for file system operations on remote ssh -servers using the SFTP subsystem. It also implements an SFTP server for serving -files from the filesystem. - -[![UNIX Build Status](https://travis-ci.org/pkg/sftp.svg?branch=master)](https://travis-ci.org/pkg/sftp) [![GoDoc](http://godoc.org/github.com/pkg/sftp?status.svg)](http://godoc.org/github.com/pkg/sftp) - -usage and examples ------------------- - -See [godoc.org/github.com/pkg/sftp](http://godoc.org/github.com/pkg/sftp) for -examples and usage. - -The basic operation of the package mirrors the facilities of the -[os](http://golang.org/pkg/os) package. - -The Walker interface for directory traversal is heavily inspired by Keith -Rarick's [fs](http://godoc.org/github.com/kr/fs) package. - -roadmap -------- - - * There is way too much duplication in the Client methods. If there was an - unmarshal(interface{}) method this would reduce a heap of the duplication. - -contributing ------------- - -We welcome pull requests, bug fixes and issue reports. - -Before proposing a large change, first please discuss your change by raising an -issue. - -For API/code bugs, please include a small, self contained code example to -reproduce the issue. For pull requests, remember test coverage. - -We try to handle issues and pull requests with a 0 open philosophy. That means -we will try to address the submission as soon as possible and will work toward -a resolution. If progress can no longer be made (eg. unreproducible bug) or -stops (eg. unresponsive submitter), we will close the bug. - -Thanks. diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go deleted file mode 100644 index 335bcc284..000000000 --- a/vendor/github.com/pkg/sftp/attrs.go +++ /dev/null @@ -1,243 +0,0 @@ -package sftp - -// ssh_FXP_ATTRS support -// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 - -import ( - "os" - "syscall" - "time" -) - -const ( - ssh_FILEXFER_ATTR_SIZE = 0x00000001 - ssh_FILEXFER_ATTR_UIDGID = 0x00000002 - ssh_FILEXFER_ATTR_PERMISSIONS = 0x00000004 - ssh_FILEXFER_ATTR_ACMODTIME = 0x00000008 - ssh_FILEXFER_ATTR_EXTENDED = 0x80000000 -) - -// fileInfo is an artificial type designed to satisfy os.FileInfo. -type fileInfo struct { - name string - size int64 - mode os.FileMode - mtime time.Time - sys interface{} -} - -// Name returns the base name of the file. -func (fi *fileInfo) Name() string { return fi.name } - -// Size returns the length in bytes for regular files; system-dependent for others. -func (fi *fileInfo) Size() int64 { return fi.size } - -// Mode returns file mode bits. -func (fi *fileInfo) Mode() os.FileMode { return fi.mode } - -// ModTime returns the last modification time of the file. -func (fi *fileInfo) ModTime() time.Time { return fi.mtime } - -// IsDir returns true if the file is a directory. -func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } - -func (fi *fileInfo) Sys() interface{} { return fi.sys } - -// FileStat holds the original unmarshalled values from a call to READDIR or -// *STAT. It is exported for the purposes of accessing the raw values via -// os.FileInfo.Sys(). It is also used server side to store the unmarshalled -// values for SetStat. -type FileStat struct { - Size uint64 - Mode uint32 - Mtime uint32 - Atime uint32 - UID uint32 - GID uint32 - Extended []StatExtended -} - -// StatExtended contains additional, extended information for a FileStat. -type StatExtended struct { - ExtType string - ExtData string -} - -func fileInfoFromStat(st *FileStat, name string) os.FileInfo { - fs := &fileInfo{ - name: name, - size: int64(st.Size), - mode: toFileMode(st.Mode), - mtime: time.Unix(int64(st.Mtime), 0), - sys: st, - } - return fs -} - -func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { - mtime := fi.ModTime().Unix() - atime := mtime - var flags uint32 = ssh_FILEXFER_ATTR_SIZE | - ssh_FILEXFER_ATTR_PERMISSIONS | - ssh_FILEXFER_ATTR_ACMODTIME - - fileStat := FileStat{ - Size: uint64(fi.Size()), - Mode: fromFileMode(fi.Mode()), - Mtime: uint32(mtime), - Atime: uint32(atime), - } - - // os specific file stat decoding - fileStatFromInfoOs(fi, &flags, &fileStat) - - return flags, fileStat -} - -func unmarshalAttrs(b []byte) (*FileStat, []byte) { - flags, b := unmarshalUint32(b) - return getFileStat(flags, b) -} - -func getFileStat(flags uint32, b []byte) (*FileStat, []byte) { - var fs FileStat - if flags&ssh_FILEXFER_ATTR_SIZE == ssh_FILEXFER_ATTR_SIZE { - fs.Size, b = unmarshalUint64(b) - } - if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { - fs.UID, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { - fs.GID, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_PERMISSIONS == ssh_FILEXFER_ATTR_PERMISSIONS { - fs.Mode, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_ACMODTIME == ssh_FILEXFER_ATTR_ACMODTIME { - fs.Atime, b = unmarshalUint32(b) - fs.Mtime, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_EXTENDED == ssh_FILEXFER_ATTR_EXTENDED { - var count uint32 - count, b = unmarshalUint32(b) - ext := make([]StatExtended, count) - for i := uint32(0); i < count; i++ { - var typ string - var data string - typ, b = unmarshalString(b) - data, b = unmarshalString(b) - ext[i] = StatExtended{typ, data} - } - fs.Extended = ext - } - return &fs, b -} - -func marshalFileInfo(b []byte, fi os.FileInfo) []byte { - // attributes variable struct, and also variable per protocol version - // spec version 3 attributes: - // uint32 flags - // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE - // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS - // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED - // string extended_type - // string extended_data - // ... more extended data (extended_type - extended_data pairs), - // so that number of pairs equals extended_count - - flags, fileStat := fileStatFromInfo(fi) - - b = marshalUint32(b, flags) - if flags&ssh_FILEXFER_ATTR_SIZE != 0 { - b = marshalUint64(b, fileStat.Size) - } - if flags&ssh_FILEXFER_ATTR_UIDGID != 0 { - b = marshalUint32(b, fileStat.UID) - b = marshalUint32(b, fileStat.GID) - } - if flags&ssh_FILEXFER_ATTR_PERMISSIONS != 0 { - b = marshalUint32(b, fileStat.Mode) - } - if flags&ssh_FILEXFER_ATTR_ACMODTIME != 0 { - b = marshalUint32(b, fileStat.Atime) - b = marshalUint32(b, fileStat.Mtime) - } - - return b -} - -// toFileMode converts sftp filemode bits to the os.FileMode specification -func toFileMode(mode uint32) os.FileMode { - var fm = os.FileMode(mode & 0777) - switch mode & syscall.S_IFMT { - case syscall.S_IFBLK: - fm |= os.ModeDevice - case syscall.S_IFCHR: - fm |= os.ModeDevice | os.ModeCharDevice - case syscall.S_IFDIR: - fm |= os.ModeDir - case syscall.S_IFIFO: - fm |= os.ModeNamedPipe - case syscall.S_IFLNK: - fm |= os.ModeSymlink - case syscall.S_IFREG: - // nothing to do - case syscall.S_IFSOCK: - fm |= os.ModeSocket - } - if mode&syscall.S_ISGID != 0 { - fm |= os.ModeSetgid - } - if mode&syscall.S_ISUID != 0 { - fm |= os.ModeSetuid - } - if mode&syscall.S_ISVTX != 0 { - fm |= os.ModeSticky - } - return fm -} - -// fromFileMode converts from the os.FileMode specification to sftp filemode bits -func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(0) - - if mode&os.ModeDevice != 0 { - if mode&os.ModeCharDevice != 0 { - ret |= syscall.S_IFCHR - } else { - ret |= syscall.S_IFBLK - } - } - if mode&os.ModeDir != 0 { - ret |= syscall.S_IFDIR - } - if mode&os.ModeSymlink != 0 { - ret |= syscall.S_IFLNK - } - if mode&os.ModeNamedPipe != 0 { - ret |= syscall.S_IFIFO - } - if mode&os.ModeSetgid != 0 { - ret |= syscall.S_ISGID - } - if mode&os.ModeSetuid != 0 { - ret |= syscall.S_ISUID - } - if mode&os.ModeSticky != 0 { - ret |= syscall.S_ISVTX - } - if mode&os.ModeSocket != 0 { - ret |= syscall.S_IFSOCK - } - - if mode&os.ModeType == 0 { - ret |= syscall.S_IFREG - } - ret |= uint32(mode & os.ModePerm) - - return ret -} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go deleted file mode 100644 index 81cf3eac2..000000000 --- a/vendor/github.com/pkg/sftp/attrs_stubs.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !cgo,!plan9 windows android - -package sftp - -import ( - "os" -) - -func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { - // todo -} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go deleted file mode 100644 index 95e7922e9..000000000 --- a/vendor/github.com/pkg/sftp/attrs_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix -// +build cgo - -package sftp - -import ( - "os" - "syscall" -) - -func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { - if statt, ok := fi.Sys().(*syscall.Stat_t); ok { - *flags |= ssh_FILEXFER_ATTR_UIDGID - fileStat.UID = statt.Uid - fileStat.GID = statt.Gid - } -} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go deleted file mode 100644 index ef885d801..000000000 --- a/vendor/github.com/pkg/sftp/client.go +++ /dev/null @@ -1,1283 +0,0 @@ -package sftp - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "path" - "sync/atomic" - "syscall" - "time" - - "github.com/kr/fs" - "github.com/pkg/errors" - "golang.org/x/crypto/ssh" -) - -// InternalInconsistency indicates the packets sent and the data queued to be -// written to the file don't match up. It is an unusual error and usually is -// caused by bad behavior server side or connection issues. The error is -// limited in scope to the call where it happened, the client object is still -// OK to use as long as the connection is still open. -var InternalInconsistency = errors.New("internal inconsistency") - -// A ClientOption is a function which applies configuration to a Client. -type ClientOption func(*Client) error - -// MaxPacketChecked sets the maximum size of the payload, measured in bytes. -// This option only accepts sizes servers should support, ie. <= 32768 bytes. -// -// If you get the error "failed to send packet header: EOF" when copying a -// large file, try lowering this number. -// -// The default packet size is 32768 bytes. -func MaxPacketChecked(size int) ClientOption { - return func(c *Client) error { - if size < 1 { - return errors.Errorf("size must be greater or equal to 1") - } - if size > 32768 { - return errors.Errorf("sizes larger than 32KB might not work with all servers") - } - c.maxPacket = size - return nil - } -} - -// MaxPacketUnchecked sets the maximum size of the payload, measured in bytes. -// It accepts sizes larger than the 32768 bytes all servers should support. -// Only use a setting higher than 32768 if your application always connects to -// the same server or after sufficiently broad testing. -// -// If you get the error "failed to send packet header: EOF" when copying a -// large file, try lowering this number. -// -// The default packet size is 32768 bytes. -func MaxPacketUnchecked(size int) ClientOption { - return func(c *Client) error { - if size < 1 { - return errors.Errorf("size must be greater or equal to 1") - } - c.maxPacket = size - return nil - } -} - -// MaxPacket sets the maximum size of the payload, measured in bytes. -// This option only accepts sizes servers should support, ie. <= 32768 bytes. -// This is a synonym for MaxPacketChecked that provides backward compatibility. -// -// If you get the error "failed to send packet header: EOF" when copying a -// large file, try lowering this number. -// -// The default packet size is 32768 bytes. -func MaxPacket(size int) ClientOption { - return MaxPacketChecked(size) -} - -// MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file. -// -// The default maximum concurrent requests is 64. -func MaxConcurrentRequestsPerFile(n int) ClientOption { - return func(c *Client) error { - if n < 1 { - return errors.Errorf("n must be greater or equal to 1") - } - c.maxConcurrentRequests = n - return nil - } -} - -// NewClient creates a new SFTP client on conn, using zero or more option -// functions. -func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { - s, err := conn.NewSession() - if err != nil { - return nil, err - } - if err := s.RequestSubsystem("sftp"); err != nil { - return nil, err - } - pw, err := s.StdinPipe() - if err != nil { - return nil, err - } - pr, err := s.StdoutPipe() - if err != nil { - return nil, err - } - - return NewClientPipe(pr, pw, opts...) -} - -// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. -// This can be used for connecting to an SFTP server over TCP/TLS or by using -// the system's ssh client program (e.g. via exec.Command). -func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) { - sftp := &Client{ - clientConn: clientConn{ - conn: conn{ - Reader: rd, - WriteCloser: wr, - }, - inflight: make(map[uint32]chan<- result), - closed: make(chan struct{}), - }, - maxPacket: 1 << 15, - maxConcurrentRequests: 64, - } - if err := sftp.applyOptions(opts...); err != nil { - wr.Close() - return nil, err - } - if err := sftp.sendInit(); err != nil { - wr.Close() - return nil, err - } - if err := sftp.recvVersion(); err != nil { - wr.Close() - return nil, err - } - sftp.clientConn.wg.Add(1) - go sftp.loop() - return sftp, nil -} - -// Client represents an SFTP session on a *ssh.ClientConn SSH connection. -// Multiple Clients can be active on a single SSH connection, and a Client -// may be called concurrently from multiple Goroutines. -// -// Client implements the github.com/kr/fs.FileSystem interface. -type Client struct { - clientConn - - maxPacket int // max packet size read or written. - nextid uint32 - maxConcurrentRequests int -} - -// Create creates the named file mode 0666 (before umask), truncating it if it -// already exists. If successful, methods on the returned File can be used for -// I/O; the associated file descriptor has mode O_RDWR. If you need more -// control over the flags/mode used to open the file see client.OpenFile. -func (c *Client) Create(path string) (*File, error) { - return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) -} - -const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 - -func (c *Client) sendInit() error { - return c.clientConn.conn.sendPacket(sshFxInitPacket{ - Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 - }) -} - -// returns the next value of c.nextid -func (c *Client) nextID() uint32 { - return atomic.AddUint32(&c.nextid, 1) -} - -func (c *Client) recvVersion() error { - typ, data, err := c.recvPacket() - if err != nil { - return err - } - if typ != ssh_FXP_VERSION { - return &unexpectedPacketErr{ssh_FXP_VERSION, typ} - } - - version, _ := unmarshalUint32(data) - if version != sftpProtocolVersion { - return &unexpectedVersionErr{sftpProtocolVersion, version} - } - - return nil -} - -// Walk returns a new Walker rooted at root. -func (c *Client) Walk(root string) *fs.Walker { - return fs.WalkFS(root, c) -} - -// ReadDir reads the directory named by dirname and returns a list of -// directory entries. -func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { - handle, err := c.opendir(p) - if err != nil { - return nil, err - } - defer c.close(handle) // this has to defer earlier than the lock below - var attrs []os.FileInfo - var done = false - for !done { - id := c.nextID() - typ, data, err1 := c.sendPacket(sshFxpReaddirPacket{ - ID: id, - Handle: handle, - }) - if err1 != nil { - err = err1 - done = true - break - } - switch typ { - case ssh_FXP_NAME: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - for i := uint32(0); i < count; i++ { - var filename string - filename, data = unmarshalString(data) - _, data = unmarshalString(data) // discard longname - var attr *FileStat - attr, data = unmarshalAttrs(data) - if filename == "." || filename == ".." { - continue - } - attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) - } - case ssh_FXP_STATUS: - // TODO(dfc) scope warning! - err = normaliseError(unmarshalStatus(id, data)) - done = true - default: - return nil, unimplementedPacketErr(typ) - } - } - if err == io.EOF { - err = nil - } - return attrs, err -} - -func (c *Client) opendir(path string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpOpendirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return "", err - } - switch typ { - case ssh_FXP_HANDLE: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - handle, _ := unmarshalString(data) - return handle, nil - case ssh_FXP_STATUS: - return "", normaliseError(unmarshalStatus(id, data)) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Stat returns a FileInfo structure describing the file specified by path 'p'. -// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. -func (c *Client) Stat(p string) (os.FileInfo, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpStatPacket{ - ID: id, - Path: p, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_ATTRS: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return fileInfoFromStat(attr, path.Base(p)), nil - case ssh_FXP_STATUS: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// Lstat returns a FileInfo structure describing the file specified by path 'p'. -// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. -func (c *Client) Lstat(p string) (os.FileInfo, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpLstatPacket{ - ID: id, - Path: p, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_ATTRS: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return fileInfoFromStat(attr, path.Base(p)), nil - case ssh_FXP_STATUS: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// ReadLink reads the target of a symbolic link. -func (c *Client) ReadLink(p string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpReadlinkPacket{ - ID: id, - Path: p, - }) - if err != nil { - return "", err - } - switch typ { - case ssh_FXP_NAME: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - if count != 1 { - return "", unexpectedCount(1, count) - } - filename, _ := unmarshalString(data) // ignore dummy attributes - return filename, nil - case ssh_FXP_STATUS: - return "", normaliseError(unmarshalStatus(id, data)) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' -func (c *Client) Symlink(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpSymlinkPacket{ - ID: id, - Linkpath: newname, - Targetpath: oldname, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. -func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpSetstatPacket{ - ID: id, - Path: path, - Flags: flags, - Attrs: attrs, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// Chtimes changes the access and modification times of the named file. -func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { - type times struct { - Atime uint32 - Mtime uint32 - } - attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} - return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs) -} - -// Chown changes the user and group owners of the named file. -func (c *Client) Chown(path string, uid, gid int) error { - type owner struct { - UID uint32 - GID uint32 - } - attrs := owner{uint32(uid), uint32(gid)} - return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs) -} - -// Chmod changes the permissions of the named file. -func (c *Client) Chmod(path string, mode os.FileMode) error { - return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode)) -} - -// Truncate sets the size of the named file. Although it may be safely assumed -// that if the size is less than its current size it will be truncated to fit, -// the SFTP protocol does not specify what behavior the server should do when setting -// size greater than the current size. -func (c *Client) Truncate(path string, size int64) error { - return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size)) -} - -// Open opens the named file for reading. If successful, methods on the -// returned file can be used for reading; the associated file descriptor -// has mode O_RDONLY. -func (c *Client) Open(path string) (*File, error) { - return c.open(path, flags(os.O_RDONLY)) -} - -// OpenFile is the generalized open call; most users will use Open or -// Create instead. It opens the named file with specified flag (O_RDONLY -// etc.). If successful, methods on the returned File can be used for I/O. -func (c *Client) OpenFile(path string, f int) (*File, error) { - return c.open(path, flags(f)) -} - -func (c *Client) open(path string, pflags uint32) (*File, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpOpenPacket{ - ID: id, - Path: path, - Pflags: pflags, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_HANDLE: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - handle, _ := unmarshalString(data) - return &File{c: c, path: path, handle: handle}, nil - case ssh_FXP_STATUS: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// close closes a handle handle previously returned in the response -// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid -// immediately after this request has been sent. -func (c *Client) close(handle string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpClosePacket{ - ID: id, - Handle: handle, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -func (c *Client) fstat(handle string) (*FileStat, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpFstatPacket{ - ID: id, - Handle: handle, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_ATTRS: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return attr, nil - case ssh_FXP_STATUS: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// StatVFS retrieves VFS statistics from a remote host. -// -// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature -// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. -func (c *Client) StatVFS(path string) (*StatVFS, error) { - // send the StatVFS packet to the server - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpStatvfsPacket{ - ID: id, - Path: path, - }) - if err != nil { - return nil, err - } - - switch typ { - // server responded with valid data - case ssh_FXP_EXTENDED_REPLY: - var response StatVFS - err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) - if err != nil { - return nil, errors.New("can not parse reply") - } - - return &response, nil - - // the resquest failed - case ssh_FXP_STATUS: - return nil, errors.New(fxp(ssh_FXP_STATUS).String()) - - default: - return nil, unimplementedPacketErr(typ) - } -} - -// Join joins any number of path elements into a single path, adding a -// separating slash if necessary. The result is Cleaned; in particular, all -// empty strings are ignored. -func (c *Client) Join(elem ...string) string { return path.Join(elem...) } - -// Remove removes the specified file or directory. An error will be returned if no -// file or directory with the specified path exists, or if the specified directory -// is not empty. -func (c *Client) Remove(path string) error { - err := c.removeFile(path) - if err, ok := err.(*StatusError); ok { - switch err.Code { - // some servers, *cough* osx *cough*, return EPERM, not ENODIR. - // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY - case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY: - return c.RemoveDirectory(path) - } - } - return err -} - -func (c *Client) removeFile(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRemovePacket{ - ID: id, - Filename: path, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// RemoveDirectory removes a directory path. -func (c *Client) RemoveDirectory(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRmdirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// Rename renames a file. -func (c *Client) Rename(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRenamePacket{ - ID: id, - Oldpath: oldname, - Newpath: newname, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// PosixRename renames a file using the posix-rename@openssh.com extension -// which will replace newname if it already exists. -func (c *Client) PosixRename(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpPosixRenamePacket{ - ID: id, - Oldpath: oldname, - Newpath: newname, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -func (c *Client) realpath(path string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRealpathPacket{ - ID: id, - Path: path, - }) - if err != nil { - return "", err - } - switch typ { - case ssh_FXP_NAME: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - if count != 1 { - return "", unexpectedCount(1, count) - } - filename, _ := unmarshalString(data) // ignore attributes - return filename, nil - case ssh_FXP_STATUS: - return "", normaliseError(unmarshalStatus(id, data)) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Getwd returns the current working directory of the server. Operations -// involving relative paths will be based at this location. -func (c *Client) Getwd() (string, error) { - return c.realpath(".") -} - -// Mkdir creates the specified directory. An error will be returned if a file or -// directory with the specified path already exists, or if the directory's -// parent folder does not exist (the method cannot create complete paths). -func (c *Client) Mkdir(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpMkdirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// MkdirAll creates a directory named path, along with any necessary parents, -// and returns nil, or else returns an error. -// If path is already a directory, MkdirAll does nothing and returns nil. -// If path contains a regular file, an error is returned -func (c *Client) MkdirAll(path string) error { - // Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13 - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := c.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = c.MkdirAll(path[0 : j-1]) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = c.Mkdir(path) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := c.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// applyOptions applies options functions to the Client. -// If an error is encountered, option processing ceases. -func (c *Client) applyOptions(opts ...ClientOption) error { - for _, f := range opts { - if err := f(c); err != nil { - return err - } - } - return nil -} - -// File represents a remote file. -type File struct { - c *Client - path string - handle string - offset uint64 // current offset within remote file -} - -// Close closes the File, rendering it unusable for I/O. It returns an -// error, if any. -func (f *File) Close() error { - return f.c.close(f.handle) -} - -// Name returns the name of the file as presented to Open or Create. -func (f *File) Name() string { - return f.path -} - -// Read reads up to len(b) bytes from the File. It returns the number of bytes -// read and an error, if any. Read follows io.Reader semantics, so when Read -// encounters an error or EOF condition after successfully reading n > 0 bytes, -// it returns the number of bytes read. -// -// To maximise throughput for transferring the entire file (especially -// over high latency links) it is recommended to use WriteTo rather -// than calling Read multiple times. io.Copy will do this -// automatically. -func (f *File) Read(b []byte) (int, error) { - // Split the read into multiple maxPacket sized concurrent reads - // bounded by maxConcurrentRequests. This allows reads with a suitably - // large buffer to transfer data at a much faster rate due to - // overlapping round trip times. - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - // maxConcurrentRequests buffer to deal with broadcastErr() floods - // also must have a buffer of max value of (desiredInFlight - inFlight) - ch := make(chan result, f.c.maxConcurrentRequests+1) - type inflightRead struct { - b []byte - offset uint64 - } - reqs := map[uint32]inflightRead{} - type offsetErr struct { - offset uint64 - err error - } - var firstErr offsetErr - - sendReq := func(b []byte, offset uint64) { - reqID := f.c.nextID() - f.c.dispatchRequest(ch, sshFxpReadPacket{ - ID: reqID, - Handle: f.handle, - Offset: offset, - Len: uint32(len(b)), - }) - inFlight++ - reqs[reqID] = inflightRead{b: b, offset: offset} - } - - var read int - for len(b) > 0 || inFlight > 0 { - for inFlight < desiredInFlight && len(b) > 0 && firstErr.err == nil { - l := min(len(b), f.c.maxPacket) - rb := b[:l] - sendReq(rb, offset) - offset += uint64(l) - b = b[l:] - } - - if inFlight == 0 { - break - } - res := <-ch - inFlight-- - if res.err != nil { - firstErr = offsetErr{offset: 0, err: res.err} - continue - } - reqID, data := unmarshalUint32(res.data) - req, ok := reqs[reqID] - if !ok { - firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} - continue - } - delete(reqs, reqID) - switch res.typ { - case ssh_FXP_STATUS: - if firstErr.err == nil || req.offset < firstErr.offset { - firstErr = offsetErr{ - offset: req.offset, - err: normaliseError(unmarshalStatus(reqID, res.data)), - } - } - case ssh_FXP_DATA: - l, data := unmarshalUint32(data) - n := copy(req.b, data[:l]) - read += n - if n < len(req.b) { - sendReq(req.b[l:], req.offset+uint64(l)) - } - if desiredInFlight < f.c.maxConcurrentRequests { - desiredInFlight++ - } - default: - firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} - } - } - // If the error is anything other than EOF, then there - // may be gaps in the data copied to the buffer so it's - // best to return 0 so the caller can't make any - // incorrect assumptions about the state of the buffer. - if firstErr.err != nil && firstErr.err != io.EOF { - read = 0 - } - f.offset += uint64(read) - return read, firstErr.err -} - -// WriteTo writes the file to w. The return value is the number of bytes -// written. Any error encountered during the write is also returned. -// -// This method is preferred over calling Read multiple times to -// maximise throughput for transferring the entire file (especially -// over high latency links). -func (f *File) WriteTo(w io.Writer) (int64, error) { - fi, err := f.Stat() - if err != nil { - return 0, err - } - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - writeOffset := offset - fileSize := uint64(fi.Size()) - // see comment on same line in Read() above - ch := make(chan result, f.c.maxConcurrentRequests+1) - type inflightRead struct { - b []byte - offset uint64 - } - reqs := map[uint32]inflightRead{} - pendingWrites := map[uint64][]byte{} - type offsetErr struct { - offset uint64 - err error - } - var firstErr offsetErr - - sendReq := func(b []byte, offset uint64) { - reqID := f.c.nextID() - f.c.dispatchRequest(ch, sshFxpReadPacket{ - ID: reqID, - Handle: f.handle, - Offset: offset, - Len: uint32(len(b)), - }) - inFlight++ - reqs[reqID] = inflightRead{b: b, offset: offset} - } - - var copied int64 - for firstErr.err == nil || inFlight > 0 { - if firstErr.err == nil { - for inFlight+len(pendingWrites) < desiredInFlight { - b := make([]byte, f.c.maxPacket) - sendReq(b, offset) - offset += uint64(f.c.maxPacket) - if offset > fileSize { - desiredInFlight = 1 - } - } - } - - if inFlight == 0 { - if firstErr.err == nil && len(pendingWrites) > 0 { - return copied, InternalInconsistency - } - break - } - res := <-ch - inFlight-- - if res.err != nil { - firstErr = offsetErr{offset: 0, err: res.err} - continue - } - reqID, data := unmarshalUint32(res.data) - req, ok := reqs[reqID] - if !ok { - firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} - continue - } - delete(reqs, reqID) - switch res.typ { - case ssh_FXP_STATUS: - if firstErr.err == nil || req.offset < firstErr.offset { - firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))} - } - case ssh_FXP_DATA: - l, data := unmarshalUint32(data) - if req.offset == writeOffset { - nbytes, err := w.Write(data) - copied += int64(nbytes) - if err != nil { - // We will never receive another DATA with offset==writeOffset, so - // the loop will drain inFlight and then exit. - firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err} - break - } - if nbytes < int(l) { - firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: io.ErrShortWrite} - break - } - switch { - case offset > fileSize: - desiredInFlight = 1 - case desiredInFlight < f.c.maxConcurrentRequests: - desiredInFlight++ - } - writeOffset += uint64(nbytes) - for { - pendingData, ok := pendingWrites[writeOffset] - if !ok { - break - } - // Give go a chance to free the memory. - delete(pendingWrites, writeOffset) - nbytes, err := w.Write(pendingData) - // Do not move writeOffset on error so subsequent iterations won't trigger - // any writes. - if err != nil { - firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err} - break - } - if nbytes < len(pendingData) { - firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: io.ErrShortWrite} - break - } - writeOffset += uint64(nbytes) - } - } else { - // Don't write the data yet because - // this response came in out of order - // and we need to wait for responses - // for earlier segments of the file. - pendingWrites[req.offset] = data - } - default: - firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} - } - } - if firstErr.err != io.EOF { - return copied, firstErr.err - } - return copied, nil -} - -// Stat returns the FileInfo structure describing file. If there is an -// error. -func (f *File) Stat() (os.FileInfo, error) { - fs, err := f.c.fstat(f.handle) - if err != nil { - return nil, err - } - return fileInfoFromStat(fs, path.Base(f.path)), nil -} - -// Write writes len(b) bytes to the File. It returns the number of bytes -// written and an error, if any. Write returns a non-nil error when n != -// len(b). -// -// To maximise throughput for transferring the entire file (especially -// over high latency links) it is recommended to use ReadFrom rather -// than calling Write multiple times. io.Copy will do this -// automatically. -func (f *File) Write(b []byte) (int, error) { - // Split the write into multiple maxPacket sized concurrent writes - // bounded by maxConcurrentRequests. This allows writes with a suitably - // large buffer to transfer data at a much faster rate due to - // overlapping round trip times. - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - // see comment on same line in Read() above - ch := make(chan result, f.c.maxConcurrentRequests+1) - var firstErr error - written := len(b) - for len(b) > 0 || inFlight > 0 { - for inFlight < desiredInFlight && len(b) > 0 && firstErr == nil { - l := min(len(b), f.c.maxPacket) - rb := b[:l] - f.c.dispatchRequest(ch, sshFxpWritePacket{ - ID: f.c.nextID(), - Handle: f.handle, - Offset: offset, - Length: uint32(len(rb)), - Data: rb, - }) - inFlight++ - offset += uint64(l) - b = b[l:] - } - - if inFlight == 0 { - break - } - res := <-ch - inFlight-- - if res.err != nil { - firstErr = res.err - continue - } - switch res.typ { - case ssh_FXP_STATUS: - id, _ := unmarshalUint32(res.data) - err := normaliseError(unmarshalStatus(id, res.data)) - if err != nil && firstErr == nil { - firstErr = err - break - } - if desiredInFlight < f.c.maxConcurrentRequests { - desiredInFlight++ - } - default: - firstErr = unimplementedPacketErr(res.typ) - } - } - // If error is non-nil, then there may be gaps in the data written to - // the file so it's best to return 0 so the caller can't make any - // incorrect assumptions about the state of the file. - if firstErr != nil { - written = 0 - } - f.offset += uint64(written) - return written, firstErr -} - -// ReadFrom reads data from r until EOF and writes it to the file. The return -// value is the number of bytes read. Any error except io.EOF encountered -// during the read is also returned. -// -// This method is preferred over calling Write multiple times to -// maximise throughput for transferring the entire file (especially -// over high latency links). -func (f *File) ReadFrom(r io.Reader) (int64, error) { - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - // see comment on same line in Read() above - ch := make(chan result, f.c.maxConcurrentRequests+1) - var firstErr error - read := int64(0) - b := make([]byte, f.c.maxPacket) - for inFlight > 0 || firstErr == nil { - for inFlight < desiredInFlight && firstErr == nil { - n, err := r.Read(b) - if err != nil { - firstErr = err - } - f.c.dispatchRequest(ch, sshFxpWritePacket{ - ID: f.c.nextID(), - Handle: f.handle, - Offset: offset, - Length: uint32(n), - Data: b[:n], - }) - inFlight++ - offset += uint64(n) - read += int64(n) - } - - if inFlight == 0 { - break - } - res := <-ch - inFlight-- - if res.err != nil { - firstErr = res.err - continue - } - switch res.typ { - case ssh_FXP_STATUS: - id, _ := unmarshalUint32(res.data) - err := normaliseError(unmarshalStatus(id, res.data)) - if err != nil && firstErr == nil { - firstErr = err - break - } - if desiredInFlight < f.c.maxConcurrentRequests { - desiredInFlight++ - } - default: - firstErr = unimplementedPacketErr(res.typ) - } - } - if firstErr == io.EOF { - firstErr = nil - } - // If error is non-nil, then there may be gaps in the data written to - // the file so it's best to return 0 so the caller can't make any - // incorrect assumptions about the state of the file. - if firstErr != nil { - read = 0 - } - f.offset += uint64(read) - return read, firstErr -} - -// Seek implements io.Seeker by setting the client offset for the next Read or -// Write. It returns the next offset read. Seeking before or after the end of -// the file is undefined. Seeking relative to the end calls Stat. -func (f *File) Seek(offset int64, whence int) (int64, error) { - switch whence { - case io.SeekStart: - f.offset = uint64(offset) - case io.SeekCurrent: - f.offset = uint64(int64(f.offset) + offset) - case io.SeekEnd: - fi, err := f.Stat() - if err != nil { - return int64(f.offset), err - } - f.offset = uint64(fi.Size() + offset) - default: - return int64(f.offset), unimplementedSeekWhence(whence) - } - return int64(f.offset), nil -} - -// Chown changes the uid/gid of the current file. -func (f *File) Chown(uid, gid int) error { - return f.c.Chown(f.path, uid, gid) -} - -// Chmod changes the permissions of the current file. -func (f *File) Chmod(mode os.FileMode) error { - return f.c.Chmod(f.path, mode) -} - -// Truncate sets the size of the current file. Although it may be safely assumed -// that if the size is less than its current size it will be truncated to fit, -// the SFTP protocol does not specify what behavior the server should do when setting -// size greater than the current size. -func (f *File) Truncate(size int64) error { - return f.c.Truncate(f.path, size) -} - -func min(a, b int) int { - if a > b { - return b - } - return a -} - -// normaliseError normalises an error into a more standard form that can be -// checked against stdlib errors like io.EOF or os.ErrNotExist. -func normaliseError(err error) error { - switch err := err.(type) { - case *StatusError: - switch err.Code { - case ssh_FX_EOF: - return io.EOF - case ssh_FX_NO_SUCH_FILE: - return os.ErrNotExist - case ssh_FX_OK: - return nil - default: - return err - } - default: - return err - } -} - -func unmarshalStatus(id uint32, data []byte) error { - sid, data := unmarshalUint32(data) - if sid != id { - return &unexpectedIDErr{id, sid} - } - code, data := unmarshalUint32(data) - msg, data, _ := unmarshalStringSafe(data) - lang, _, _ := unmarshalStringSafe(data) - return &StatusError{ - Code: code, - msg: msg, - lang: lang, - } -} - -func marshalStatus(b []byte, err StatusError) []byte { - b = marshalUint32(b, err.Code) - b = marshalString(b, err.msg) - b = marshalString(b, err.lang) - return b -} - -// flags converts the flags passed to OpenFile into ssh flags. -// Unsupported flags are ignored. -func flags(f int) uint32 { - var out uint32 - switch f & os.O_WRONLY { - case os.O_WRONLY: - out |= ssh_FXF_WRITE - case os.O_RDONLY: - out |= ssh_FXF_READ - } - if f&os.O_RDWR == os.O_RDWR { - out |= ssh_FXF_READ | ssh_FXF_WRITE - } - if f&os.O_APPEND == os.O_APPEND { - out |= ssh_FXF_APPEND - } - if f&os.O_CREATE == os.O_CREATE { - out |= ssh_FXF_CREAT - } - if f&os.O_TRUNC == os.O_TRUNC { - out |= ssh_FXF_TRUNC - } - if f&os.O_EXCL == os.O_EXCL { - out |= ssh_FXF_EXCL - } - return out -} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go deleted file mode 100644 index 62e585e82..000000000 --- a/vendor/github.com/pkg/sftp/conn.go +++ /dev/null @@ -1,146 +0,0 @@ -package sftp - -import ( - "encoding" - "io" - "sync" - - "github.com/pkg/errors" -) - -// conn implements a bidirectional channel on which client and server -// connections are multiplexed. -type conn struct { - io.Reader - io.WriteCloser - sync.Mutex // used to serialise writes to sendPacket - // sendPacketTest is needed to replicate packet issues in testing - sendPacketTest func(w io.Writer, m encoding.BinaryMarshaler) error -} - -func (c *conn) recvPacket() (uint8, []byte, error) { - return recvPacket(c) -} - -func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { - c.Lock() - defer c.Unlock() - if c.sendPacketTest != nil { - return c.sendPacketTest(c, m) - } - return sendPacket(c, m) -} - -type clientConn struct { - conn - wg sync.WaitGroup - sync.Mutex // protects inflight - inflight map[uint32]chan<- result // outstanding requests - - closed chan struct{} - err error -} - -// Wait blocks until the conn has shut down, and return the error -// causing the shutdown. It can be called concurrently from multiple -// goroutines. -func (c *clientConn) Wait() error { - <-c.closed - return c.err -} - -// Close closes the SFTP session. -func (c *clientConn) Close() error { - defer c.wg.Wait() - return c.conn.Close() -} - -func (c *clientConn) loop() { - defer c.wg.Done() - err := c.recv() - if err != nil { - c.broadcastErr(err) - } -} - -// recv continuously reads from the server and forwards responses to the -// appropriate channel. -func (c *clientConn) recv() error { - defer func() { - c.conn.Lock() - c.conn.Close() - c.conn.Unlock() - }() - for { - typ, data, err := c.recvPacket() - if err != nil { - return err - } - sid, _ := unmarshalUint32(data) - c.Lock() - ch, ok := c.inflight[sid] - delete(c.inflight, sid) - c.Unlock() - if !ok { - // This is an unexpected occurrence. Send the error - // back to all listeners so that they terminate - // gracefully. - return errors.Errorf("sid: %v not fond", sid) - } - ch <- result{typ: typ, data: data} - } -} - -// result captures the result of receiving the a packet from the server -type result struct { - typ byte - data []byte - err error -} - -type idmarshaler interface { - id() uint32 - encoding.BinaryMarshaler -} - -func (c *clientConn) sendPacket(p idmarshaler) (byte, []byte, error) { - ch := make(chan result, 2) - c.dispatchRequest(ch, p) - s := <-ch - return s.typ, s.data, s.err -} - -func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { - c.Lock() - c.inflight[p.id()] = ch - c.Unlock() - if err := c.conn.sendPacket(p); err != nil { - c.Lock() - delete(c.inflight, p.id()) - c.Unlock() - ch <- result{err: err} - } -} - -// broadcastErr sends an error to all goroutines waiting for a response. -func (c *clientConn) broadcastErr(err error) { - c.Lock() - listeners := make([]chan<- result, 0, len(c.inflight)) - for _, ch := range c.inflight { - listeners = append(listeners, ch) - } - c.Unlock() - for _, ch := range listeners { - ch <- result{err: err} - } - c.err = err - close(c.closed) -} - -type serverConn struct { - conn -} - -func (s *serverConn) sendError(p ider, err error) error { - return s.sendPacket(statusFromError(p, err)) -} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go deleted file mode 100644 index 3e264abe3..000000000 --- a/vendor/github.com/pkg/sftp/debug.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build debug - -package sftp - -import "log" - -func debug(fmt string, args ...interface{}) { - log.Printf(fmt, args...) -} diff --git a/vendor/github.com/pkg/sftp/match.go b/vendor/github.com/pkg/sftp/match.go deleted file mode 100644 index e2f2ba409..000000000 --- a/vendor/github.com/pkg/sftp/match.go +++ /dev/null @@ -1,295 +0,0 @@ -package sftp - -import ( - "path" - "strings" - "unicode/utf8" -) - -// ErrBadPattern indicates a globbing pattern was malformed. -var ErrBadPattern = path.ErrBadPattern - -// Unix separator -const separator = "/" - -// Match reports whether name matches the shell file name pattern. -// The pattern syntax is: -// -// pattern: -// { term } -// term: -// '*' matches any sequence of non-Separator characters -// '?' matches any single non-Separator character -// '[' [ '^' ] { character-range } ']' -// character class (must be non-empty) -// c matches character c (c != '*', '?', '\\', '[') -// '\\' c matches character c -// -// character-range: -// c matches character c (c != '\\', '-', ']') -// '\\' c matches character c -// lo '-' hi matches character c for lo <= c <= hi -// -// Match requires pattern to match all of name, not just a substring. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// -func Match(pattern, name string) (matched bool, err error) { - return path.Match(pattern, name) -} - -// detect if byte(char) is path separator -func isPathSeparator(c byte) bool { - return string(c) == "/" -} - -// scanChunk gets the next segment of pattern, which is a non-star string -// possibly preceded by a star. -func scanChunk(pattern string) (star bool, chunk, rest string) { - for len(pattern) > 0 && pattern[0] == '*' { - pattern = pattern[1:] - star = true - } - inrange := false - var i int -Scan: - for i = 0; i < len(pattern); i++ { - switch pattern[i] { - case '\\': - - // error check handled in matchChunk: bad pattern. - if i+1 < len(pattern) { - i++ - } - case '[': - inrange = true - case ']': - inrange = false - case '*': - if !inrange { - break Scan - } - } - } - return star, pattern[0:i], pattern[i:] -} - -// matchChunk checks whether chunk matches the beginning of s. -// If so, it returns the remainder of s (after the match). -// Chunk is all single-character operators: literals, char classes, and ?. -func matchChunk(chunk, s string) (rest string, ok bool, err error) { - for len(chunk) > 0 { - if len(s) == 0 { - return - } - switch chunk[0] { - case '[': - // character class - r, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - // We can't end right after '[', we're expecting at least - // a closing bracket and possibly a caret. - if len(chunk) == 0 { - err = ErrBadPattern - return - } - // possibly negated - negated := chunk[0] == '^' - if negated { - chunk = chunk[1:] - } - // parse all ranges - match := false - nrange := 0 - for { - if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { - chunk = chunk[1:] - break - } - var lo, hi rune - if lo, chunk, err = getEsc(chunk); err != nil { - return - } - hi = lo - if chunk[0] == '-' { - if hi, chunk, err = getEsc(chunk[1:]); err != nil { - return - } - } - if lo <= r && r <= hi { - match = true - } - nrange++ - } - if match == negated { - return - } - - case '?': - if isPathSeparator(s[0]) { - return - } - _, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - - case '\\': - chunk = chunk[1:] - if len(chunk) == 0 { - err = ErrBadPattern - return - } - fallthrough - - default: - if chunk[0] != s[0] { - return - } - s = s[1:] - chunk = chunk[1:] - } - } - return s, true, nil -} - -// getEsc gets a possibly-escaped character from chunk, for a character class. -func getEsc(chunk string) (r rune, nchunk string, err error) { - if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { - err = ErrBadPattern - return - } - if chunk[0] == '\\' { - chunk = chunk[1:] - if len(chunk) == 0 { - err = ErrBadPattern - return - } - } - r, n := utf8.DecodeRuneInString(chunk) - if r == utf8.RuneError && n == 1 { - err = ErrBadPattern - } - nchunk = chunk[n:] - if len(nchunk) == 0 { - err = ErrBadPattern - } - return -} - -// Split splits path immediately following the final Separator, -// separating it into a directory and file name component. -// If there is no Separator in path, Split returns an empty dir -// and file set to path. -// The returned values have the property that path = dir+file. -func Split(path string) (dir, file string) { - i := len(path) - 1 - for i >= 0 && !isPathSeparator(path[i]) { - i-- - } - return path[:i+1], path[i+1:] -} - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -func (c *Client) Glob(pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - file, err := c.Lstat(pattern) - if err != nil { - return nil, nil - } - dir, _ := Split(pattern) - dir = cleanGlobPath(dir) - return []string{Join(dir, file.Name())}, nil - } - - dir, file := Split(pattern) - dir = cleanGlobPath(dir) - - if !hasMeta(dir) { - return c.glob(dir, file, nil) - } - - // Prevent infinite recursion. See issue 15879. - if dir == pattern { - return nil, ErrBadPattern - } - - var m []string - m, err = c.Glob(dir) - if err != nil { - return - } - for _, d := range m { - matches, err = c.glob(d, file, matches) - if err != nil { - return - } - } - return -} - -// cleanGlobPath prepares path for glob matching. -func cleanGlobPath(path string) string { - switch path { - case "": - return "." - case string(separator): - // do nothing to the path - return path - default: - return path[0 : len(path)-1] // chop off trailing separator - } -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func (c *Client) glob(dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := c.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - names, err := c.ReadDir(dir) - if err != nil { - return - } - //sort.Strings(names) - - for _, n := range names { - matched, err := Match(pattern, n.Name()) - if err != nil { - return m, err - } - if matched { - m = append(m, Join(dir, n.Name())) - } - } - return -} - -// Join joins any number of path elements into a single path, adding -// a Separator if necessary. -// all empty strings are ignored. -func Join(elem ...string) string { - return path.Join(elem...) -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go deleted file mode 100644 index 2f3be10a6..000000000 --- a/vendor/github.com/pkg/sftp/packet-manager.go +++ /dev/null @@ -1,203 +0,0 @@ -package sftp - -import ( - "encoding" - "sort" - "sync" -) - -// The goal of the packetManager is to keep the outgoing packets in the same -// order as the incoming as is requires by section 7 of the RFC. - -type packetManager struct { - requests chan orderedPacket - responses chan orderedPacket - fini chan struct{} - incoming orderedPackets - outgoing orderedPackets - sender packetSender // connection object - working *sync.WaitGroup - packetCount uint32 -} - -type packetSender interface { - sendPacket(encoding.BinaryMarshaler) error -} - -func newPktMgr(sender packetSender) *packetManager { - s := &packetManager{ - requests: make(chan orderedPacket, SftpServerWorkerCount), - responses: make(chan orderedPacket, SftpServerWorkerCount), - fini: make(chan struct{}), - incoming: make([]orderedPacket, 0, SftpServerWorkerCount), - outgoing: make([]orderedPacket, 0, SftpServerWorkerCount), - sender: sender, - working: &sync.WaitGroup{}, - } - go s.controller() - return s -} - -//// packet ordering -func (s *packetManager) newOrderId() uint32 { - s.packetCount++ - return s.packetCount -} - -type orderedRequest struct { - requestPacket - orderid uint32 -} - -func (s *packetManager) newOrderedRequest(p requestPacket) orderedRequest { - return orderedRequest{requestPacket: p, orderid: s.newOrderId()} -} -func (p orderedRequest) orderId() uint32 { return p.orderid } -func (p orderedRequest) setOrderId(oid uint32) { p.orderid = oid } - -type orderedResponse struct { - responsePacket - orderid uint32 -} - -func (s *packetManager) newOrderedResponse(p responsePacket, id uint32, -) orderedResponse { - return orderedResponse{responsePacket: p, orderid: id} -} -func (p orderedResponse) orderId() uint32 { return p.orderid } -func (p orderedResponse) setOrderId(oid uint32) { p.orderid = oid } - -type orderedPacket interface { - id() uint32 - orderId() uint32 -} -type orderedPackets []orderedPacket - -func (o orderedPackets) Sort() { - sort.Slice(o, func(i, j int) bool { - return o[i].orderId() < o[j].orderId() - }) -} - -//// packet registry -// register incoming packets to be handled -func (s *packetManager) incomingPacket(pkt orderedRequest) { - s.working.Add(1) - s.requests <- pkt -} - -// register outgoing packets as being ready -func (s *packetManager) readyPacket(pkt orderedResponse) { - s.responses <- pkt - s.working.Done() -} - -// shut down packetManager controller -func (s *packetManager) close() { - // pause until current packets are processed - s.working.Wait() - close(s.fini) -} - -// Passed a worker function, returns a channel for incoming packets. -// Keep process packet responses in the order they are received while -// maximizing throughput of file transfers. -func (s *packetManager) workerChan(runWorker func(chan orderedRequest), -) chan orderedRequest { - - // multiple workers for faster read/writes - rwChan := make(chan orderedRequest, SftpServerWorkerCount) - for i := 0; i < SftpServerWorkerCount; i++ { - runWorker(rwChan) - } - - // single worker to enforce sequential processing of everything else - cmdChan := make(chan orderedRequest) - runWorker(cmdChan) - - pktChan := make(chan orderedRequest, SftpServerWorkerCount) - go func() { - for pkt := range pktChan { - switch pkt.requestPacket.(type) { - case *sshFxpReadPacket, *sshFxpWritePacket: - s.incomingPacket(pkt) - rwChan <- pkt - continue - case *sshFxpClosePacket: - // wait for reads/writes to finish when file is closed - // incomingPacket() call must occur after this - s.working.Wait() - } - s.incomingPacket(pkt) - // all non-RW use sequential cmdChan - cmdChan <- pkt - } - close(rwChan) - close(cmdChan) - s.close() - }() - - return pktChan -} - -// process packets -func (s *packetManager) controller() { - for { - select { - case pkt := <-s.requests: - debug("incoming id (oid): %v (%v)", pkt.id(), pkt.orderId()) - s.incoming = append(s.incoming, pkt) - s.incoming.Sort() - case pkt := <-s.responses: - debug("outgoing id (oid): %v (%v)", pkt.id(), pkt.orderId()) - s.outgoing = append(s.outgoing, pkt) - s.outgoing.Sort() - case <-s.fini: - return - } - s.maybeSendPackets() - } -} - -// send as many packets as are ready -func (s *packetManager) maybeSendPackets() { - for { - if len(s.outgoing) == 0 || len(s.incoming) == 0 { - debug("break! -- outgoing: %v; incoming: %v", - len(s.outgoing), len(s.incoming)) - break - } - out := s.outgoing[0] - in := s.incoming[0] - // debug("incoming: %v", ids(s.incoming)) - // debug("outgoing: %v", ids(s.outgoing)) - if in.orderId() == out.orderId() { - debug("Sending packet: %v", out.id()) - s.sender.sendPacket(out.(encoding.BinaryMarshaler)) - // pop off heads - copy(s.incoming, s.incoming[1:]) // shift left - s.incoming[len(s.incoming)-1] = nil // clear last - s.incoming = s.incoming[:len(s.incoming)-1] // remove last - copy(s.outgoing, s.outgoing[1:]) // shift left - s.outgoing[len(s.outgoing)-1] = nil // clear last - s.outgoing = s.outgoing[:len(s.outgoing)-1] // remove last - } else { - break - } - } -} - -// func oids(o []orderedPacket) []uint32 { -// res := make([]uint32, 0, len(o)) -// for _, v := range o { -// res = append(res, v.orderId()) -// } -// return res -// } -// func ids(o []orderedPacket) []uint32 { -// res := make([]uint32, 0, len(o)) -// for _, v := range o { -// res = append(res, v.id()) -// } -// return res -// } diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go deleted file mode 100644 index bcff9bf33..000000000 --- a/vendor/github.com/pkg/sftp/packet-typing.go +++ /dev/null @@ -1,134 +0,0 @@ -package sftp - -import ( - "encoding" - - "github.com/pkg/errors" -) - -// all incoming packets -type requestPacket interface { - encoding.BinaryUnmarshaler - id() uint32 -} - -type responsePacket interface { - encoding.BinaryMarshaler - id() uint32 -} - -// interfaces to group types -type hasPath interface { - requestPacket - getPath() string -} - -type hasHandle interface { - requestPacket - getHandle() string -} - -type notReadOnly interface { - notReadOnly() -} - -//// define types by adding methods -// hasPath -func (p sshFxpLstatPacket) getPath() string { return p.Path } -func (p sshFxpStatPacket) getPath() string { return p.Path } -func (p sshFxpRmdirPacket) getPath() string { return p.Path } -func (p sshFxpReadlinkPacket) getPath() string { return p.Path } -func (p sshFxpRealpathPacket) getPath() string { return p.Path } -func (p sshFxpMkdirPacket) getPath() string { return p.Path } -func (p sshFxpSetstatPacket) getPath() string { return p.Path } -func (p sshFxpStatvfsPacket) getPath() string { return p.Path } -func (p sshFxpRemovePacket) getPath() string { return p.Filename } -func (p sshFxpRenamePacket) getPath() string { return p.Oldpath } -func (p sshFxpSymlinkPacket) getPath() string { return p.Targetpath } -func (p sshFxpOpendirPacket) getPath() string { return p.Path } -func (p sshFxpOpenPacket) getPath() string { return p.Path } - -func (p sshFxpExtendedPacketPosixRename) getPath() string { return p.Oldpath } - -// hasHandle -func (p sshFxpFstatPacket) getHandle() string { return p.Handle } -func (p sshFxpFsetstatPacket) getHandle() string { return p.Handle } -func (p sshFxpReadPacket) getHandle() string { return p.Handle } -func (p sshFxpWritePacket) getHandle() string { return p.Handle } -func (p sshFxpReaddirPacket) getHandle() string { return p.Handle } -func (p sshFxpClosePacket) getHandle() string { return p.Handle } - -// notReadOnly -func (p sshFxpWritePacket) notReadOnly() {} -func (p sshFxpSetstatPacket) notReadOnly() {} -func (p sshFxpFsetstatPacket) notReadOnly() {} -func (p sshFxpRemovePacket) notReadOnly() {} -func (p sshFxpMkdirPacket) notReadOnly() {} -func (p sshFxpRmdirPacket) notReadOnly() {} -func (p sshFxpRenamePacket) notReadOnly() {} -func (p sshFxpSymlinkPacket) notReadOnly() {} -func (p sshFxpExtendedPacketPosixRename) notReadOnly() {} - -// some packets with ID are missing id() -func (p sshFxpDataPacket) id() uint32 { return p.ID } -func (p sshFxpStatusPacket) id() uint32 { return p.ID } -func (p sshFxpStatResponse) id() uint32 { return p.ID } -func (p sshFxpNamePacket) id() uint32 { return p.ID } -func (p sshFxpHandlePacket) id() uint32 { return p.ID } -func (p StatVFS) id() uint32 { return p.ID } -func (p sshFxVersionPacket) id() uint32 { return 0 } - -// take raw incoming packet data and build packet objects -func makePacket(p rxPacket) (requestPacket, error) { - var pkt requestPacket - switch p.pktType { - case ssh_FXP_INIT: - pkt = &sshFxInitPacket{} - case ssh_FXP_LSTAT: - pkt = &sshFxpLstatPacket{} - case ssh_FXP_OPEN: - pkt = &sshFxpOpenPacket{} - case ssh_FXP_CLOSE: - pkt = &sshFxpClosePacket{} - case ssh_FXP_READ: - pkt = &sshFxpReadPacket{} - case ssh_FXP_WRITE: - pkt = &sshFxpWritePacket{} - case ssh_FXP_FSTAT: - pkt = &sshFxpFstatPacket{} - case ssh_FXP_SETSTAT: - pkt = &sshFxpSetstatPacket{} - case ssh_FXP_FSETSTAT: - pkt = &sshFxpFsetstatPacket{} - case ssh_FXP_OPENDIR: - pkt = &sshFxpOpendirPacket{} - case ssh_FXP_READDIR: - pkt = &sshFxpReaddirPacket{} - case ssh_FXP_REMOVE: - pkt = &sshFxpRemovePacket{} - case ssh_FXP_MKDIR: - pkt = &sshFxpMkdirPacket{} - case ssh_FXP_RMDIR: - pkt = &sshFxpRmdirPacket{} - case ssh_FXP_REALPATH: - pkt = &sshFxpRealpathPacket{} - case ssh_FXP_STAT: - pkt = &sshFxpStatPacket{} - case ssh_FXP_RENAME: - pkt = &sshFxpRenamePacket{} - case ssh_FXP_READLINK: - pkt = &sshFxpReadlinkPacket{} - case ssh_FXP_SYMLINK: - pkt = &sshFxpSymlinkPacket{} - case ssh_FXP_EXTENDED: - pkt = &sshFxpExtendedPacket{} - default: - return nil, errors.Errorf("unhandled packet type: %s", p.pktType) - } - if err := pkt.UnmarshalBinary(p.pktBytes); err != nil { - // Return partially unpacked packet to allow callers to return - // error messages appropriately with necessary id() method. - return pkt, err - } - return pkt, nil -} diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go deleted file mode 100644 index e2dec7aea..000000000 --- a/vendor/github.com/pkg/sftp/packet.go +++ /dev/null @@ -1,959 +0,0 @@ -package sftp - -import ( - "bytes" - "encoding" - "encoding/binary" - "fmt" - "io" - "os" - "reflect" - - "github.com/pkg/errors" -) - -var ( - errShortPacket = errors.New("packet too short") - errUnknownExtendedPacket = errors.New("unknown extended packet") -) - -const ( - debugDumpTxPacket = false - debugDumpRxPacket = false - debugDumpTxPacketBytes = false - debugDumpRxPacketBytes = false -) - -func marshalUint32(b []byte, v uint32) []byte { - return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -func marshalUint64(b []byte, v uint64) []byte { - return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) -} - -func marshalString(b []byte, v string) []byte { - return append(marshalUint32(b, uint32(len(v))), v...) -} - -func marshal(b []byte, v interface{}) []byte { - if v == nil { - return b - } - switch v := v.(type) { - case uint8: - return append(b, v) - case uint32: - return marshalUint32(b, v) - case uint64: - return marshalUint64(b, v) - case string: - return marshalString(b, v) - case os.FileInfo: - return marshalFileInfo(b, v) - default: - switch d := reflect.ValueOf(v); d.Kind() { - case reflect.Struct: - for i, n := 0, d.NumField(); i < n; i++ { - b = append(marshal(b, d.Field(i).Interface())) - } - return b - case reflect.Slice: - for i, n := 0, d.Len(); i < n; i++ { - b = append(marshal(b, d.Index(i).Interface())) - } - return b - default: - panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) - } - } -} - -func unmarshalUint32(b []byte) (uint32, []byte) { - v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 - return v, b[4:] -} - -func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { - var v uint32 - if len(b) < 4 { - return 0, nil, errShortPacket - } - v, b = unmarshalUint32(b) - return v, b, nil -} - -func unmarshalUint64(b []byte) (uint64, []byte) { - h, b := unmarshalUint32(b) - l, b := unmarshalUint32(b) - return uint64(h)<<32 | uint64(l), b -} - -func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { - var v uint64 - if len(b) < 8 { - return 0, nil, errShortPacket - } - v, b = unmarshalUint64(b) - return v, b, nil -} - -func unmarshalString(b []byte) (string, []byte) { - n, b := unmarshalUint32(b) - return string(b[:n]), b[n:] -} - -func unmarshalStringSafe(b []byte) (string, []byte, error) { - n, b, err := unmarshalUint32Safe(b) - if err != nil { - return "", nil, err - } - if int64(n) > int64(len(b)) { - return "", nil, errShortPacket - } - return string(b[:n]), b[n:], nil -} - -// sendPacket marshals p according to RFC 4234. -func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { - bb, err := m.MarshalBinary() - if err != nil { - return errors.Errorf("binary marshaller failed: %v", err) - } - if debugDumpTxPacketBytes { - debug("send packet: %s %d bytes %x", fxp(bb[0]), len(bb), bb[1:]) - } else if debugDumpTxPacket { - debug("send packet: %s %d bytes", fxp(bb[0]), len(bb)) - } - // Slide packet down 4 bytes to make room for length header. - packet := append(bb, make([]byte, 4)...) // optimistically assume bb has capacity - copy(packet[4:], bb) - binary.BigEndian.PutUint32(packet[:4], uint32(len(bb))) - - _, err = w.Write(packet) - if err != nil { - return errors.Errorf("failed to send packet: %v", err) - } - return nil -} - -func recvPacket(r io.Reader) (uint8, []byte, error) { - var b = []byte{0, 0, 0, 0} - if _, err := io.ReadFull(r, b); err != nil { - return 0, nil, err - } - l, _ := unmarshalUint32(b) - b = make([]byte, l) - if _, err := io.ReadFull(r, b); err != nil { - debug("recv packet %d bytes: err %v", l, err) - return 0, nil, err - } - if debugDumpRxPacketBytes { - debug("recv packet: %s %d bytes %x", fxp(b[0]), l, b[1:]) - } else if debugDumpRxPacket { - debug("recv packet: %s %d bytes", fxp(b[0]), l) - } - return b[0], b[1:], nil -} - -type extensionPair struct { - Name string - Data string -} - -func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { - var ep extensionPair - var err error - ep.Name, b, err = unmarshalStringSafe(b) - if err != nil { - return ep, b, err - } - ep.Data, b, err = unmarshalStringSafe(b) - return ep, b, err -} - -// Here starts the definition of packets along with their MarshalBinary -// implementations. -// Manually writing the marshalling logic wins us a lot of time and -// allocation. - -type sshFxInitPacket struct { - Version uint32 - Extensions []extensionPair -} - -func (p sshFxInitPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 // byte + uint32 - for _, e := range p.Extensions { - l += 4 + len(e.Name) + 4 + len(e.Data) - } - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_INIT) - b = marshalUint32(b, p.Version) - for _, e := range p.Extensions { - b = marshalString(b, e.Name) - b = marshalString(b, e.Data) - } - return b, nil -} - -func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { - var err error - if p.Version, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - for len(b) > 0 { - var ep extensionPair - ep, b, err = unmarshalExtensionPair(b) - if err != nil { - return err - } - p.Extensions = append(p.Extensions, ep) - } - return nil -} - -type sshFxVersionPacket struct { - Version uint32 - Extensions []struct { - Name, Data string - } -} - -func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 // byte + uint32 - for _, e := range p.Extensions { - l += 4 + len(e.Name) + 4 + len(e.Data) - } - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_VERSION) - b = marshalUint32(b, p.Version) - for _, e := range p.Extensions { - b = marshalString(b, e.Name) - b = marshalString(b, e.Data) - } - return b, nil -} - -func marshalIDString(packetType byte, id uint32, str string) ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(str) - - b := make([]byte, 0, l) - b = append(b, packetType) - b = marshalUint32(b, id) - b = marshalString(b, str) - return b, nil -} - -func unmarshalIDString(b []byte, id *uint32, str *string) error { - var err error - *id, b, err = unmarshalUint32Safe(b) - if err != nil { - return err - } - *str, _, err = unmarshalStringSafe(b) - return err -} - -type sshFxpReaddirPacket struct { - ID uint32 - Handle string -} - -func (p sshFxpReaddirPacket) id() uint32 { return p.ID } - -func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_READDIR, p.ID, p.Handle) -} - -func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpOpendirPacket struct { - ID uint32 - Path string -} - -func (p sshFxpOpendirPacket) id() uint32 { return p.ID } - -func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_OPENDIR, p.ID, p.Path) -} - -func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpLstatPacket struct { - ID uint32 - Path string -} - -func (p sshFxpLstatPacket) id() uint32 { return p.ID } - -func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_LSTAT, p.ID, p.Path) -} - -func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpStatPacket struct { - ID uint32 - Path string -} - -func (p sshFxpStatPacket) id() uint32 { return p.ID } - -func (p sshFxpStatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_STAT, p.ID, p.Path) -} - -func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpFstatPacket struct { - ID uint32 - Handle string -} - -func (p sshFxpFstatPacket) id() uint32 { return p.ID } - -func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_FSTAT, p.ID, p.Handle) -} - -func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpClosePacket struct { - ID uint32 - Handle string -} - -func (p sshFxpClosePacket) id() uint32 { return p.ID } - -func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_CLOSE, p.ID, p.Handle) -} - -func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpRemovePacket struct { - ID uint32 - Filename string -} - -func (p sshFxpRemovePacket) id() uint32 { return p.ID } - -func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_REMOVE, p.ID, p.Filename) -} - -func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Filename) -} - -type sshFxpRmdirPacket struct { - ID uint32 - Path string -} - -func (p sshFxpRmdirPacket) id() uint32 { return p.ID } - -func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_RMDIR, p.ID, p.Path) -} - -func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpSymlinkPacket struct { - ID uint32 - Targetpath string - Linkpath string -} - -func (p sshFxpSymlinkPacket) id() uint32 { return p.ID } - -func (p sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Targetpath) + - 4 + len(p.Linkpath) - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_SYMLINK) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Targetpath) - b = marshalString(b, p.Linkpath) - return b, nil -} - -func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Linkpath, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpReadlinkPacket struct { - ID uint32 - Path string -} - -func (p sshFxpReadlinkPacket) id() uint32 { return p.ID } - -func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_READLINK, p.ID, p.Path) -} - -func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpRealpathPacket struct { - ID uint32 - Path string -} - -func (p sshFxpRealpathPacket) id() uint32 { return p.ID } - -func (p sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_REALPATH, p.ID, p.Path) -} - -func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpNameAttr struct { - Name string - LongName string - Attrs []interface{} -} - -func (p sshFxpNameAttr) MarshalBinary() ([]byte, error) { - b := []byte{} - b = marshalString(b, p.Name) - b = marshalString(b, p.LongName) - for _, attr := range p.Attrs { - b = marshal(b, attr) - } - return b, nil -} - -type sshFxpNamePacket struct { - ID uint32 - NameAttrs []sshFxpNameAttr -} - -func (p sshFxpNamePacket) MarshalBinary() ([]byte, error) { - b := []byte{} - b = append(b, ssh_FXP_NAME) - b = marshalUint32(b, p.ID) - b = marshalUint32(b, uint32(len(p.NameAttrs))) - for _, na := range p.NameAttrs { - ab, err := na.MarshalBinary() - if err != nil { - return nil, err - } - - b = append(b, ab...) - } - return b, nil -} - -type sshFxpOpenPacket struct { - ID uint32 - Path string - Pflags uint32 - Flags uint32 // ignored -} - -func (p sshFxpOpenPacket) id() uint32 { return p.ID } - -func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + - 4 + len(p.Path) + - 4 + 4 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_OPEN) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Pflags) - b = marshalUint32(b, p.Flags) - return b, nil -} - -func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpReadPacket struct { - ID uint32 - Handle string - Offset uint64 - Len uint32 -} - -func (p sshFxpReadPacket) id() uint32 { return p.ID } - -func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Handle) + - 8 + 4 // uint64 + uint32 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_READ) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint64(b, p.Offset) - b = marshalUint32(b, p.Len) - return b, nil -} - -func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { - return err - } else if p.Len, _, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpRenamePacket struct { - ID uint32 - Oldpath string - Newpath string -} - -func (p sshFxpRenamePacket) id() uint32 { return p.ID } - -func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Oldpath) + - 4 + len(p.Newpath) - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_RENAME) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Oldpath) - b = marshalString(b, p.Newpath) - return b, nil -} - -func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpPosixRenamePacket struct { - ID uint32 - Oldpath string - Newpath string -} - -func (p sshFxpPosixRenamePacket) id() uint32 { return p.ID } - -func (p sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) { - const ext = "posix-rename@openssh.com" - l := 1 + 4 + // type(byte) + uint32 - 4 + len(ext) + - 4 + len(p.Oldpath) + - 4 + len(p.Newpath) - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_EXTENDED) - b = marshalUint32(b, p.ID) - b = marshalString(b, ext) - b = marshalString(b, p.Oldpath) - b = marshalString(b, p.Newpath) - return b, nil -} - -type sshFxpWritePacket struct { - ID uint32 - Handle string - Offset uint64 - Length uint32 - Data []byte -} - -func (p sshFxpWritePacket) id() uint32 { return p.ID } - -func (p sshFxpWritePacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Handle) + - 8 + 4 + // uint64 + uint32 - len(p.Data) - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_WRITE) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint64(b, p.Offset) - b = marshalUint32(b, p.Length) - b = append(b, p.Data...) - return b, nil -} - -func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { - return err - } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if uint32(len(b)) < p.Length { - return errShortPacket - } - - p.Data = append([]byte{}, b[:p.Length]...) - return nil -} - -type sshFxpMkdirPacket struct { - ID uint32 - Path string - Flags uint32 // ignored -} - -func (p sshFxpMkdirPacket) id() uint32 { return p.ID } - -func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Path) + - 4 // uint32 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_MKDIR) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Flags) - return b, nil -} - -func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpSetstatPacket struct { - ID uint32 - Path string - Flags uint32 - Attrs interface{} -} - -type sshFxpFsetstatPacket struct { - ID uint32 - Handle string - Flags uint32 - Attrs interface{} -} - -func (p sshFxpSetstatPacket) id() uint32 { return p.ID } -func (p sshFxpFsetstatPacket) id() uint32 { return p.ID } - -func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Path) + - 4 // uint32 + uint64 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_SETSTAT) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Flags) - b = marshal(b, p.Attrs) - return b, nil -} - -func (p sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Handle) + - 4 // uint32 + uint64 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_FSETSTAT) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint32(b, p.Flags) - b = marshal(b, p.Attrs) - return b, nil -} - -func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - p.Attrs = b - return nil -} - -func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - p.Attrs = b - return nil -} - -type sshFxpHandlePacket struct { - ID uint32 - Handle string -} - -func (p sshFxpHandlePacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_HANDLE} - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - return b, nil -} - -type sshFxpStatusPacket struct { - ID uint32 - StatusError -} - -func (p sshFxpStatusPacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_STATUS} - b = marshalUint32(b, p.ID) - b = marshalStatus(b, p.StatusError) - return b, nil -} - -type sshFxpDataPacket struct { - ID uint32 - Length uint32 - Data []byte -} - -func (p sshFxpDataPacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_DATA} - b = marshalUint32(b, p.ID) - b = marshalUint32(b, p.Length) - b = append(b, p.Data[:p.Length]...) - return b, nil -} - -func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if uint32(len(b)) < p.Length { - return errors.New("truncated packet") - } - - p.Data = make([]byte, p.Length) - copy(p.Data, b) - return nil -} - -type sshFxpStatvfsPacket struct { - ID uint32 - Path string -} - -func (p sshFxpStatvfsPacket) id() uint32 { return p.ID } - -func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - len(p.Path) + - len("statvfs@openssh.com") - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_EXTENDED) - b = marshalUint32(b, p.ID) - b = marshalString(b, "statvfs@openssh.com") - b = marshalString(b, p.Path) - return b, nil -} - -// A StatVFS contains statistics about a filesystem. -type StatVFS struct { - ID uint32 - Bsize uint64 /* file system block size */ - Frsize uint64 /* fundamental fs block size */ - Blocks uint64 /* number of blocks (unit f_frsize) */ - Bfree uint64 /* free blocks in file system */ - Bavail uint64 /* free blocks for non-root */ - Files uint64 /* total file inodes */ - Ffree uint64 /* free file inodes */ - Favail uint64 /* free file inodes for to non-root */ - Fsid uint64 /* file system id */ - Flag uint64 /* bit mask of f_flag values */ - Namemax uint64 /* maximum filename length */ -} - -// TotalSpace calculates the amount of total space in a filesystem. -func (p *StatVFS) TotalSpace() uint64 { - return p.Frsize * p.Blocks -} - -// FreeSpace calculates the amount of free space in a filesystem. -func (p *StatVFS) FreeSpace() uint64 { - return p.Frsize * p.Bfree -} - -// Convert to ssh_FXP_EXTENDED_REPLY packet binary format -func (p *StatVFS) MarshalBinary() ([]byte, error) { - var buf bytes.Buffer - buf.Write([]byte{ssh_FXP_EXTENDED_REPLY}) - err := binary.Write(&buf, binary.BigEndian, p) - return buf.Bytes(), err -} - -type sshFxpExtendedPacket struct { - ID uint32 - ExtendedRequest string - SpecificPacket interface { - serverRespondablePacket - readonly() bool - } -} - -func (p sshFxpExtendedPacket) id() uint32 { return p.ID } -func (p sshFxpExtendedPacket) readonly() bool { - if p.SpecificPacket == nil { - return true - } - return p.SpecificPacket.readonly() -} - -func (p sshFxpExtendedPacket) respond(svr *Server) responsePacket { - if p.SpecificPacket == nil { - return statusFromError(p, nil) - } - return p.SpecificPacket.respond(svr) -} - -func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { - var err error - bOrig := b - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, _, err = unmarshalStringSafe(b); err != nil { - return err - } - - // specific unmarshalling - switch p.ExtendedRequest { - case "statvfs@openssh.com": - p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} - case "posix-rename@openssh.com": - p.SpecificPacket = &sshFxpExtendedPacketPosixRename{} - default: - return errors.Wrapf(errUnknownExtendedPacket, "packet type %v", p.SpecificPacket) - } - - return p.SpecificPacket.UnmarshalBinary(bOrig) -} - -type sshFxpExtendedPacketStatVFS struct { - ID uint32 - ExtendedRequest string - Path string -} - -func (p sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } -func (p sshFxpExtendedPacketStatVFS) readonly() bool { return true } -func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Path, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpExtendedPacketPosixRename struct { - ID uint32 - ExtendedRequest string - Oldpath string - Newpath string -} - -func (p sshFxpExtendedPacketPosixRename) id() uint32 { return p.ID } -func (p sshFxpExtendedPacketPosixRename) readonly() bool { return false } -func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -func (p sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { - err := os.Rename(p.Oldpath, p.Newpath) - return statusFromError(p, err) -} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go deleted file mode 100644 index b695528fd..000000000 --- a/vendor/github.com/pkg/sftp/release.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !debug - -package sftp - -func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go deleted file mode 100644 index 31b1eaa66..000000000 --- a/vendor/github.com/pkg/sftp/request-attrs.go +++ /dev/null @@ -1,63 +0,0 @@ -package sftp - -// Methods on the Request object to make working with the Flags bitmasks and -// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write -// request and AttrFlags() and Attributes() when working with SetStat requests. -import "os" - -// File Open and Write Flags. Correlate directly with with os.OpenFile flags -// (https://golang.org/pkg/os/#pkg-constants). -type FileOpenFlags struct { - Read, Write, Append, Creat, Trunc, Excl bool -} - -func newFileOpenFlags(flags uint32) FileOpenFlags { - return FileOpenFlags{ - Read: flags&ssh_FXF_READ != 0, - Write: flags&ssh_FXF_WRITE != 0, - Append: flags&ssh_FXF_APPEND != 0, - Creat: flags&ssh_FXF_CREAT != 0, - Trunc: flags&ssh_FXF_TRUNC != 0, - Excl: flags&ssh_FXF_EXCL != 0, - } -} - -// Pflags converts the bitmap/uint32 from SFTP Open packet pflag values, -// into a FileOpenFlags struct with booleans set for flags set in bitmap. -func (r *Request) Pflags() FileOpenFlags { - return newFileOpenFlags(r.Flags) -} - -// Flags that indicate whether SFTP file attributes were passed. When a flag is -// true the corresponding attribute should be available from the FileStat -// object returned by Attributes method. Used with SetStat. -type FileAttrFlags struct { - Size, UidGid, Permissions, Acmodtime bool -} - -func newFileAttrFlags(flags uint32) FileAttrFlags { - return FileAttrFlags{ - Size: (flags & ssh_FILEXFER_ATTR_SIZE) != 0, - UidGid: (flags & ssh_FILEXFER_ATTR_UIDGID) != 0, - Permissions: (flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0, - Acmodtime: (flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0, - } -} - -// FileAttrFlags returns a FileAttrFlags boolean struct based on the -// bitmap/uint32 file attribute flags from the SFTP packaet. -func (r *Request) AttrFlags() FileAttrFlags { - return newFileAttrFlags(r.Flags) -} - -// FileMode returns the Mode SFTP file attributes wrapped as os.FileMode -func (a FileStat) FileMode() os.FileMode { - return os.FileMode(a.Mode) -} - -// Attributres parses file attributes byte blob and return them in a -// FileStat object. -func (r *Request) Attributes() *FileStat { - fs, _ := getFileStat(r.Flags, r.Attrs) - return fs -} diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go deleted file mode 100644 index 00451e74d..000000000 --- a/vendor/github.com/pkg/sftp/request-errors.go +++ /dev/null @@ -1,42 +0,0 @@ -package sftp - -// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more -// direct control of the errors being sent vs. letting the library work them -// out from the standard os/io errors. - -type fxerr uint32 - -const ( - ErrSshFxOk = fxerr(ssh_FX_OK) - ErrSshFxEof = fxerr(ssh_FX_EOF) - ErrSshFxNoSuchFile = fxerr(ssh_FX_NO_SUCH_FILE) - ErrSshFxPermissionDenied = fxerr(ssh_FX_PERMISSION_DENIED) - ErrSshFxFailure = fxerr(ssh_FX_FAILURE) - ErrSshFxBadMessage = fxerr(ssh_FX_BAD_MESSAGE) - ErrSshFxNoConnection = fxerr(ssh_FX_NO_CONNECTION) - ErrSshFxConnectionLost = fxerr(ssh_FX_CONNECTION_LOST) - ErrSshFxOpUnsupported = fxerr(ssh_FX_OP_UNSUPPORTED) -) - -func (e fxerr) Error() string { - switch e { - case ErrSshFxOk: - return "OK" - case ErrSshFxEof: - return "EOF" - case ErrSshFxNoSuchFile: - return "No Such File" - case ErrSshFxPermissionDenied: - return "Permission Denied" - case ErrSshFxBadMessage: - return "Bad Message" - case ErrSshFxNoConnection: - return "No Connection" - case ErrSshFxConnectionLost: - return "Connection Lost" - case ErrSshFxOpUnsupported: - return "Operation Unsupported" - default: - return "Failure" - } -} diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go deleted file mode 100644 index e5abd1842..000000000 --- a/vendor/github.com/pkg/sftp/request-example.go +++ /dev/null @@ -1,267 +0,0 @@ -package sftp - -// This serves as an example of how to implement the request server handler as -// well as a dummy backend for testing. It implements an in-memory backend that -// works as a very simple filesystem with simple flat key-value lookup system. - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "sync" - "syscall" - "time" -) - -// InMemHandler returns a Hanlders object with the test handlers. -func InMemHandler() Handlers { - root := &root{ - files: make(map[string]*memFile), - } - root.memFile = newMemFile("/", true) - return Handlers{root, root, root, root} -} - -// Example Handlers -func (fs *root) Fileread(r *Request) (io.ReaderAt, error) { - if fs.mockErr != nil { - return nil, fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - fs.filesLock.Lock() - defer fs.filesLock.Unlock() - file, err := fs.fetch(r.Filepath) - if err != nil { - return nil, err - } - if file.symlink != "" { - file, err = fs.fetch(file.symlink) - if err != nil { - return nil, err - } - } - return file.ReaderAt() -} - -func (fs *root) Filewrite(r *Request) (io.WriterAt, error) { - if fs.mockErr != nil { - return nil, fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - fs.filesLock.Lock() - defer fs.filesLock.Unlock() - file, err := fs.fetch(r.Filepath) - if err == os.ErrNotExist { - dir, err := fs.fetch(filepath.Dir(r.Filepath)) - if err != nil { - return nil, err - } - if !dir.isdir { - return nil, os.ErrInvalid - } - file = newMemFile(r.Filepath, false) - fs.files[r.Filepath] = file - } - return file.WriterAt() -} - -func (fs *root) Filecmd(r *Request) error { - if fs.mockErr != nil { - return fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - fs.filesLock.Lock() - defer fs.filesLock.Unlock() - switch r.Method { - case "Setstat": - return nil - case "Rename": - file, err := fs.fetch(r.Filepath) - if err != nil { - return err - } - if _, ok := fs.files[r.Target]; ok { - return &os.LinkError{Op: "rename", Old: r.Filepath, New: r.Target, - Err: fmt.Errorf("dest file exists")} - } - file.name = r.Target - fs.files[r.Target] = file - delete(fs.files, r.Filepath) - case "Rmdir", "Remove": - _, err := fs.fetch(filepath.Dir(r.Filepath)) - if err != nil { - return err - } - delete(fs.files, r.Filepath) - case "Mkdir": - _, err := fs.fetch(filepath.Dir(r.Filepath)) - if err != nil { - return err - } - fs.files[r.Filepath] = newMemFile(r.Filepath, true) - case "Symlink": - _, err := fs.fetch(r.Filepath) - if err != nil { - return err - } - link := newMemFile(r.Target, false) - link.symlink = r.Filepath - fs.files[r.Target] = link - } - return nil -} - -type listerat []os.FileInfo - -// Modeled after strings.Reader's ReadAt() implementation -func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) { - var n int - if offset >= int64(len(f)) { - return 0, io.EOF - } - n = copy(ls, f[offset:]) - if n < len(ls) { - return n, io.EOF - } - return n, nil -} - -func (fs *root) Filelist(r *Request) (ListerAt, error) { - if fs.mockErr != nil { - return nil, fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - fs.filesLock.Lock() - defer fs.filesLock.Unlock() - - file, err := fs.fetch(r.Filepath) - if err != nil { - return nil, err - } - - switch r.Method { - case "List": - if !file.IsDir() { - return nil, syscall.ENOTDIR - } - ordered_names := []string{} - for fn, _ := range fs.files { - if filepath.Dir(fn) == r.Filepath { - ordered_names = append(ordered_names, fn) - } - } - sort.Strings(ordered_names) - list := make([]os.FileInfo, len(ordered_names)) - for i, fn := range ordered_names { - list[i] = fs.files[fn] - } - return listerat(list), nil - case "Stat": - return listerat([]os.FileInfo{file}), nil - case "Readlink": - if file.symlink != "" { - file, err = fs.fetch(file.symlink) - if err != nil { - return nil, err - } - } - return listerat([]os.FileInfo{file}), nil - } - return nil, nil -} - -// In memory file-system-y thing that the Hanlders live on -type root struct { - *memFile - files map[string]*memFile - filesLock sync.Mutex - mockErr error -} - -// Set a mocked error that the next handler call will return. -// Set to nil to reset for no error. -func (fs *root) returnErr(err error) { - fs.mockErr = err -} - -func (fs *root) fetch(path string) (*memFile, error) { - if path == "/" { - return fs.memFile, nil - } - if file, ok := fs.files[path]; ok { - return file, nil - } - return nil, os.ErrNotExist -} - -// Implements os.FileInfo, Reader and Writer interfaces. -// These are the 3 interfaces necessary for the Handlers. -type memFile struct { - name string - modtime time.Time - symlink string - isdir bool - content []byte - contentLock sync.RWMutex -} - -// factory to make sure modtime is set -func newMemFile(name string, isdir bool) *memFile { - return &memFile{ - name: name, - modtime: time.Now(), - isdir: isdir, - } -} - -// Have memFile fulfill os.FileInfo interface -func (f *memFile) Name() string { return filepath.Base(f.name) } -func (f *memFile) Size() int64 { return int64(len(f.content)) } -func (f *memFile) Mode() os.FileMode { - ret := os.FileMode(0644) - if f.isdir { - ret = os.FileMode(0755) | os.ModeDir - } - if f.symlink != "" { - ret = os.FileMode(0777) | os.ModeSymlink - } - return ret -} -func (f *memFile) ModTime() time.Time { return f.modtime } -func (f *memFile) IsDir() bool { return f.isdir } -func (f *memFile) Sys() interface{} { - return fakeFileInfoSys() -} - -// Read/Write -func (f *memFile) ReaderAt() (io.ReaderAt, error) { - if f.isdir { - return nil, os.ErrInvalid - } - return bytes.NewReader(f.content), nil -} - -func (f *memFile) WriterAt() (io.WriterAt, error) { - if f.isdir { - return nil, os.ErrInvalid - } - return f, nil -} -func (f *memFile) WriteAt(p []byte, off int64) (int, error) { - // fmt.Println(string(p), off) - // mimic write delays, should be optional - time.Sleep(time.Microsecond * time.Duration(len(p))) - f.contentLock.Lock() - defer f.contentLock.Unlock() - plen := len(p) + int(off) - if plen >= len(f.content) { - nc := make([]byte, plen) - copy(nc, f.content) - f.content = nc - } - copy(f.content[off:], p) - return len(p), nil -} diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go deleted file mode 100644 index f69cedc5c..000000000 --- a/vendor/github.com/pkg/sftp/request-interfaces.go +++ /dev/null @@ -1,55 +0,0 @@ -package sftp - -import ( - "io" - "os" -) - -// Interfaces are differentiated based on required returned values. -// All input arguments are to be pulled from Request (the only arg). - -// The Handler interfaces all take the Request object as its only argument. -// All the data you should need to handle the call are in the Request object. -// The request.Method attribute is initially the most important one as it -// determines which Handler gets called. - -// FileReader should return an io.ReaderAt for the filepath -// Note in cases of an error, the error text will be sent to the client. -// Called for Methods: Get -type FileReader interface { - Fileread(*Request) (io.ReaderAt, error) -} - -// FileWriter should return an io.WriterAt for the filepath. -// -// The request server code will call Close() on the returned io.WriterAt -// ojbect if an io.Closer type assertion succeeds. -// Note in cases of an error, the error text will be sent to the client. -// Called for Methods: Put, Open -type FileWriter interface { - Filewrite(*Request) (io.WriterAt, error) -} - -// FileCmder should return an error -// Note in cases of an error, the error text will be sent to the client. -// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Symlink, Remove -type FileCmder interface { - Filecmd(*Request) error -} - -// FileLister should return an object that fulfils the ListerAt interface -// Note in cases of an error, the error text will be sent to the client. -// Called for Methods: List, Stat, Readlink -type FileLister interface { - Filelist(*Request) (ListerAt, error) -} - -// ListerAt does for file lists what io.ReaderAt does for files. -// ListAt should return the number of entries copied and an io.EOF -// error if at end of list. This is testable by comparing how many you -// copied to how many could be copied (eg. n < len(ls) below). -// The copy() builtin is best for the copying. -// Note in cases of an error, the error text will be sent to the client. -type ListerAt interface { - ListAt([]os.FileInfo, int64) (int, error) -} diff --git a/vendor/github.com/pkg/sftp/request-readme.md b/vendor/github.com/pkg/sftp/request-readme.md deleted file mode 100644 index f887274dc..000000000 --- a/vendor/github.com/pkg/sftp/request-readme.md +++ /dev/null @@ -1,53 +0,0 @@ -# Request Based SFTP API - -The request based API allows for custom backends in a way similar to the http -package. In order to create a backend you need to implement 4 handler -interfaces; one for reading, one for writing, one for misc commands and one for -listing files. Each has 1 required method and in each case those methods take -the Request as the only parameter and they each return something different. -These 4 interfaces are enough to handle all the SFTP traffic in a simplified -manner. - -The Request structure has 5 public fields which you will deal with. - -- Method (string) - string name of incoming call -- Filepath (string) - POSIX path of file to act on -- Flags (uint32) - 32bit bitmask value of file open/create flags -- Attrs ([]byte) - byte string of file attribute data -- Target (string) - target path for renames and sym-links - -Below are the methods and a brief description of what they need to do. - -### Fileread(*Request) (io.Reader, error) - -Handler for "Get" method and returns an io.Reader for the file which the server -then sends to the client. - -### Filewrite(*Request) (io.Writer, error) - -Handler for "Put" method and returns an io.Writer for the file which the server -then writes the uploaded file to. The file opening "pflags" are currently -preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP -spec](https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.3) for -details. - -### Filecmd(*Request) error - -Handles "SetStat", "Rename", "Rmdir", "Mkdir" and "Symlink" methods. Makes the -appropriate changes and returns nil for success or an filesystem like error -(eg. os.ErrNotExist). The attributes are currently propagated in their raw form -([]byte) and will need to be unmarshalled to be useful. See the respond method -on sshFxpSetstatPacket for example of you might want to do this. - -### Fileinfo(*Request) ([]os.FileInfo, error) - -Handles "List", "Stat", "Readlink" methods. Gathers/creates FileInfo structs -with the data on the files and returns in a list (list of 1 for Stat and -Readlink). - - -## TODO - -- Add support for API users to see trace/debugging info of what is going on -inside SFTP server. -- Unmarshal the file attributes into a structure on the Request object. diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go deleted file mode 100644 index 2e99720a9..000000000 --- a/vendor/github.com/pkg/sftp/request-server.go +++ /dev/null @@ -1,219 +0,0 @@ -package sftp - -import ( - "context" - "io" - "path" - "path/filepath" - "strconv" - "sync" - "syscall" - - "github.com/pkg/errors" -) - -var maxTxPacket uint32 = 1 << 15 - -// Handlers contains the 4 SFTP server request handlers. -type Handlers struct { - FileGet FileReader - FilePut FileWriter - FileCmd FileCmder - FileList FileLister -} - -// RequestServer abstracts the sftp protocol with an http request-like protocol -type RequestServer struct { - *serverConn - Handlers Handlers - pktMgr *packetManager - openRequests map[string]*Request - openRequestLock sync.RWMutex - handleCount int -} - -// NewRequestServer creates/allocates/returns new RequestServer. -// Normally there there will be one server per user-session. -func NewRequestServer(rwc io.ReadWriteCloser, h Handlers) *RequestServer { - svrConn := &serverConn{ - conn: conn{ - Reader: rwc, - WriteCloser: rwc, - }, - } - return &RequestServer{ - serverConn: svrConn, - Handlers: h, - pktMgr: newPktMgr(svrConn), - openRequests: make(map[string]*Request), - } -} - -// New Open packet/Request -func (rs *RequestServer) nextRequest(r *Request) string { - rs.openRequestLock.Lock() - defer rs.openRequestLock.Unlock() - rs.handleCount++ - handle := strconv.Itoa(rs.handleCount) - r.handle = handle - rs.openRequests[handle] = r - return handle -} - -// Returns Request from openRequests, bool is false if it is missing. -// -// The Requests in openRequests work essentially as open file descriptors that -// you can do different things with. What you are doing with it are denoted by -// the first packet of that type (read/write/etc). -func (rs *RequestServer) getRequest(handle string) (*Request, bool) { - rs.openRequestLock.RLock() - defer rs.openRequestLock.RUnlock() - r, ok := rs.openRequests[handle] - return r, ok -} - -// Close the Request and clear from openRequests map -func (rs *RequestServer) closeRequest(handle string) error { - rs.openRequestLock.Lock() - defer rs.openRequestLock.Unlock() - if r, ok := rs.openRequests[handle]; ok { - delete(rs.openRequests, handle) - return r.close() - } - return syscall.EBADF -} - -// Close the read/write/closer to trigger exiting the main server loop -func (rs *RequestServer) Close() error { return rs.conn.Close() } - -// Serve requests for user session -func (rs *RequestServer) Serve() error { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - var wg sync.WaitGroup - runWorker := func(ch chan orderedRequest) { - wg.Add(1) - go func() { - defer wg.Done() - if err := rs.packetWorker(ctx, ch); err != nil { - rs.conn.Close() // shuts down recvPacket - } - }() - } - pktChan := rs.pktMgr.workerChan(runWorker) - - var err error - var pkt requestPacket - var pktType uint8 - var pktBytes []byte - for { - pktType, pktBytes, err = rs.recvPacket() - if err != nil { - break - } - - pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) - if err != nil { - switch errors.Cause(err) { - case errUnknownExtendedPacket: - if err := rs.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { - debug("failed to send err packet: %v", err) - rs.conn.Close() // shuts down recvPacket - break - } - default: - debug("makePacket err: %v", err) - rs.conn.Close() // shuts down recvPacket - break - } - } - - pktChan <- rs.pktMgr.newOrderedRequest(pkt) - } - - close(pktChan) // shuts down sftpServerWorkers - wg.Wait() // wait for all workers to exit - - // make sure all open requests are properly closed - // (eg. possible on dropped connections, client crashes, etc.) - for handle, req := range rs.openRequests { - delete(rs.openRequests, handle) - req.close() - } - - return err -} - -func (rs *RequestServer) packetWorker( - ctx context.Context, pktChan chan orderedRequest, -) error { - for pkt := range pktChan { - var rpkt responsePacket - switch pkt := pkt.requestPacket.(type) { - case *sshFxInitPacket: - rpkt = sshFxVersionPacket{Version: sftpProtocolVersion} - case *sshFxpClosePacket: - handle := pkt.getHandle() - rpkt = statusFromError(pkt, rs.closeRequest(handle)) - case *sshFxpRealpathPacket: - rpkt = cleanPacketPath(pkt) - case *sshFxpOpendirPacket: - request := requestFromPacket(ctx, pkt) - rs.nextRequest(request) - rpkt = request.opendir(rs.Handlers, pkt) - case *sshFxpOpenPacket: - request := requestFromPacket(ctx, pkt) - rs.nextRequest(request) - rpkt = request.open(rs.Handlers, pkt) - case *sshFxpFstatPacket: - handle := pkt.getHandle() - request, ok := rs.getRequest(handle) - if !ok { - rpkt = statusFromError(pkt, syscall.EBADF) - } else { - request = NewRequest("Stat", request.Filepath) - rpkt = request.call(rs.Handlers, pkt) - } - case hasHandle: - handle := pkt.getHandle() - request, ok := rs.getRequest(handle) - if !ok { - rpkt = statusFromError(pkt, syscall.EBADF) - } else { - rpkt = request.call(rs.Handlers, pkt) - } - case hasPath: - request := requestFromPacket(ctx, pkt) - rpkt = request.call(rs.Handlers, pkt) - request.close() - default: - return errors.Errorf("unexpected packet type %T", pkt) - } - - rs.pktMgr.readyPacket( - rs.pktMgr.newOrderedResponse(rpkt, pkt.orderId())) - } - return nil -} - -// clean and return name packet for file -func cleanPacketPath(pkt *sshFxpRealpathPacket) responsePacket { - path := cleanPath(pkt.getPath()) - return &sshFxpNamePacket{ - ID: pkt.id(), - NameAttrs: []sshFxpNameAttr{{ - Name: path, - LongName: path, - Attrs: emptyFileStat, - }}, - } -} - -// Makes sure we have a clean POSIX (/) absolute path to work with -func cleanPath(p string) string { - p = filepath.ToSlash(p) - if !filepath.IsAbs(p) { - p = "/" + p - } - return path.Clean(p) -} diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go deleted file mode 100644 index a71a8980a..000000000 --- a/vendor/github.com/pkg/sftp/request-unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !windows - -package sftp - -import ( - "errors" - "syscall" -) - -func fakeFileInfoSys() interface{} { - return &syscall.Stat_t{Uid: 65534, Gid: 65534} -} - -func testOsSys(sys interface{}) error { - fstat := sys.(*FileStat) - if fstat.UID != uint32(65534) { - return errors.New("Uid failed to match.") - } - if fstat.GID != uint32(65534) { - return errors.New("Gid failed to match:") - } - return nil -} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go deleted file mode 100644 index e694e5f1b..000000000 --- a/vendor/github.com/pkg/sftp/request.go +++ /dev/null @@ -1,383 +0,0 @@ -package sftp - -import ( - "context" - "io" - "os" - "path" - "path/filepath" - "sync" - "syscall" - - "github.com/pkg/errors" -) - -// MaxFilelist is the max number of files to return in a readdir batch. -var MaxFilelist int64 = 100 - -// Request contains the data and state for the incoming service request. -type Request struct { - // Get, Put, Setstat, Stat, Rename, Remove - // Rmdir, Mkdir, List, Readlink, Symlink - Method string - Filepath string - Flags uint32 - Attrs []byte // convert to sub-struct - Target string // for renames and sym-links - handle string - // reader/writer/readdir from handlers - state state - // context lasts duration of request - ctx context.Context - cancelCtx context.CancelFunc -} - -type state struct { - *sync.RWMutex - writerAt io.WriterAt - readerAt io.ReaderAt - listerAt ListerAt - lsoffset int64 -} - -// New Request initialized based on packet data -func requestFromPacket(ctx context.Context, pkt hasPath) *Request { - method := requestMethod(pkt) - request := NewRequest(method, pkt.getPath()) - request.ctx, request.cancelCtx = context.WithCancel(ctx) - - switch p := pkt.(type) { - case *sshFxpOpenPacket: - request.Flags = p.Pflags - case *sshFxpSetstatPacket: - request.Flags = p.Flags - request.Attrs = p.Attrs.([]byte) - case *sshFxpRenamePacket: - request.Target = cleanPath(p.Newpath) - case *sshFxpSymlinkPacket: - request.Target = cleanPath(p.Linkpath) - } - return request -} - -// NewRequest creates a new Request object. -func NewRequest(method, path string) *Request { - return &Request{Method: method, Filepath: cleanPath(path), - state: state{RWMutex: new(sync.RWMutex)}} -} - -// shallow copy of existing request -func (r *Request) copy() *Request { - r.state.Lock() - defer r.state.Unlock() - r2 := new(Request) - *r2 = *r - return r2 -} - -// Context returns the request's context. To change the context, -// use WithContext. -// -// The returned context is always non-nil; it defaults to the -// background context. -// -// For incoming server requests, the context is canceled when the -// request is complete or the client's connection closes. -func (r *Request) Context() context.Context { - if r.ctx != nil { - return r.ctx - } - return context.Background() -} - -// WithContext returns a copy of r with its context changed to ctx. -// The provided ctx must be non-nil. -func (r *Request) WithContext(ctx context.Context) *Request { - if ctx == nil { - panic("nil context") - } - r2 := r.copy() - r2.ctx = ctx - r2.cancelCtx = nil - return r2 -} - -// Returns current offset for file list -func (r *Request) lsNext() int64 { - r.state.RLock() - defer r.state.RUnlock() - return r.state.lsoffset -} - -// Increases next offset -func (r *Request) lsInc(offset int64) { - r.state.Lock() - defer r.state.Unlock() - r.state.lsoffset = r.state.lsoffset + offset -} - -// manage file read/write state -func (r *Request) setListerState(la ListerAt) { - r.state.Lock() - defer r.state.Unlock() - r.state.listerAt = la -} - -func (r *Request) getLister() ListerAt { - r.state.RLock() - defer r.state.RUnlock() - return r.state.listerAt -} - -// Close reader/writer if possible -func (r *Request) close() error { - defer func() { - if r.cancelCtx != nil { - r.cancelCtx() - } - }() - r.state.RLock() - rd := r.state.readerAt - r.state.RUnlock() - if c, ok := rd.(io.Closer); ok { - return c.Close() - } - r.state.RLock() - wt := r.state.writerAt - r.state.RUnlock() - if c, ok := wt.(io.Closer); ok { - return c.Close() - } - return nil -} - -// called from worker to handle packet/request -func (r *Request) call(handlers Handlers, pkt requestPacket) responsePacket { - switch r.Method { - case "Get": - return fileget(handlers.FileGet, r, pkt) - case "Put": - return fileput(handlers.FilePut, r, pkt) - case "Setstat", "Rename", "Rmdir", "Mkdir", "Symlink", "Remove": - return filecmd(handlers.FileCmd, r, pkt) - case "List": - return filelist(handlers.FileList, r, pkt) - case "Stat", "Readlink": - return filestat(handlers.FileList, r, pkt) - default: - return statusFromError(pkt, - errors.Errorf("unexpected method: %s", r.Method)) - } -} - -// Additional initialization for Open packets -func (r *Request) open(h Handlers, pkt requestPacket) responsePacket { - flags := r.Pflags() - var err error - switch { - case flags.Write, flags.Append, flags.Creat, flags.Trunc: - r.Method = "Put" - r.state.writerAt, err = h.FilePut.Filewrite(r) - case flags.Read: - r.Method = "Get" - r.state.readerAt, err = h.FileGet.Fileread(r) - default: - return statusFromError(pkt, errors.New("bad file flags")) - } - if err != nil { - return statusFromError(pkt, err) - } - return &sshFxpHandlePacket{ID: pkt.id(), Handle: r.handle} -} -func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket { - var err error - r.Method = "List" - r.state.listerAt, err = h.FileList.Filelist(r) - if err != nil { - switch err.(type) { - case syscall.Errno: - err = &os.PathError{Path: r.Filepath, Err: err} - } - return statusFromError(pkt, err) - } - return &sshFxpHandlePacket{ID: pkt.id(), Handle: r.handle} -} - -// wrap FileReader handler -func fileget(h FileReader, r *Request, pkt requestPacket) responsePacket { - //fmt.Println("fileget", r) - r.state.RLock() - reader := r.state.readerAt - r.state.RUnlock() - if reader == nil { - return statusFromError(pkt, errors.New("unexpected read packet")) - } - - _, offset, length := packetData(pkt) - data := make([]byte, clamp(length, maxTxPacket)) - n, err := reader.ReadAt(data, offset) - // only return EOF erro if no data left to read - if err != nil && (err != io.EOF || n == 0) { - return statusFromError(pkt, err) - } - return &sshFxpDataPacket{ - ID: pkt.id(), - Length: uint32(n), - Data: data[:n], - } -} - -// wrap FileWriter handler -func fileput(h FileWriter, r *Request, pkt requestPacket) responsePacket { - //fmt.Println("fileput", r) - r.state.RLock() - writer := r.state.writerAt - r.state.RUnlock() - if writer == nil { - return statusFromError(pkt, errors.New("unexpected write packet")) - } - - data, offset, _ := packetData(pkt) - _, err := writer.WriteAt(data, offset) - return statusFromError(pkt, err) -} - -// file data for additional read/write packets -func packetData(p requestPacket) (data []byte, offset int64, length uint32) { - switch p := p.(type) { - case *sshFxpReadPacket: - length = p.Len - offset = int64(p.Offset) - case *sshFxpWritePacket: - data = p.Data - length = p.Length - offset = int64(p.Offset) - } - return -} - -// wrap FileCmder handler -func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { - - switch p := pkt.(type) { - case *sshFxpFsetstatPacket: - r.Flags = p.Flags - r.Attrs = p.Attrs.([]byte) - } - err := h.Filecmd(r) - return statusFromError(pkt, err) -} - -// wrap FileLister handler -func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { - var err error - lister := r.getLister() - if lister == nil { - return statusFromError(pkt, errors.New("unexpected dir packet")) - } - - offset := r.lsNext() - finfo := make([]os.FileInfo, MaxFilelist) - n, err := lister.ListAt(finfo, offset) - r.lsInc(int64(n)) - // ignore EOF as we only return it when there are no results - finfo = finfo[:n] // avoid need for nil tests below - - switch r.Method { - case "List": - if err != nil && err != io.EOF { - return statusFromError(pkt, err) - } - if err == io.EOF && n == 0 { - return statusFromError(pkt, io.EOF) - } - dirname := filepath.ToSlash(path.Base(r.Filepath)) - ret := &sshFxpNamePacket{ID: pkt.id()} - - for _, fi := range finfo { - ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ - Name: fi.Name(), - LongName: runLs(dirname, fi), - Attrs: []interface{}{fi}, - }) - } - return ret - default: - err = errors.Errorf("unexpected method: %s", r.Method) - return statusFromError(pkt, err) - } -} - -func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { - lister, err := h.Filelist(r) - if err != nil { - return statusFromError(pkt, err) - } - finfo := make([]os.FileInfo, 1) - n, err := lister.ListAt(finfo, 0) - finfo = finfo[:n] // avoid need for nil tests below - - switch r.Method { - case "Stat": - if err != nil && err != io.EOF { - return statusFromError(pkt, err) - } - if n == 0 { - err = &os.PathError{Op: "stat", Path: r.Filepath, - Err: syscall.ENOENT} - return statusFromError(pkt, err) - } - return &sshFxpStatResponse{ - ID: pkt.id(), - info: finfo[0], - } - case "Readlink": - if err != nil && err != io.EOF { - return statusFromError(pkt, err) - } - if n == 0 { - err = &os.PathError{Op: "readlink", Path: r.Filepath, - Err: syscall.ENOENT} - return statusFromError(pkt, err) - } - filename := finfo[0].Name() - return &sshFxpNamePacket{ - ID: pkt.id(), - NameAttrs: []sshFxpNameAttr{{ - Name: filename, - LongName: filename, - Attrs: emptyFileStat, - }}, - } - default: - err = errors.Errorf("unexpected method: %s", r.Method) - return statusFromError(pkt, err) - } -} - -// init attributes of request object from packet data -func requestMethod(p requestPacket) (method string) { - switch p.(type) { - case *sshFxpReadPacket, *sshFxpWritePacket, *sshFxpOpenPacket: - // set in open() above - case *sshFxpOpendirPacket, *sshFxpReaddirPacket: - // set in opendir() above - case *sshFxpSetstatPacket, *sshFxpFsetstatPacket: - method = "Setstat" - case *sshFxpRenamePacket: - method = "Rename" - case *sshFxpSymlinkPacket: - method = "Symlink" - case *sshFxpRemovePacket: - method = "Remove" - case *sshFxpStatPacket, *sshFxpLstatPacket, *sshFxpFstatPacket: - method = "Stat" - case *sshFxpRmdirPacket: - method = "Rmdir" - case *sshFxpReadlinkPacket: - method = "Readlink" - case *sshFxpMkdirPacket: - method = "Mkdir" - } - return method -} diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go deleted file mode 100644 index 94d306b6e..000000000 --- a/vendor/github.com/pkg/sftp/request_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package sftp - -import "syscall" - -func fakeFileInfoSys() interface{} { - return syscall.Win32FileAttributeData{} -} - -func testOsSys(sys interface{}) error { - return nil -} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go deleted file mode 100644 index 0fac1b668..000000000 --- a/vendor/github.com/pkg/sftp/server.go +++ /dev/null @@ -1,679 +0,0 @@ -package sftp - -// sftp server counterpart - -import ( - "encoding" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "sync" - "syscall" - "time" - - "github.com/pkg/errors" -) - -const ( - SftpServerWorkerCount = 8 -) - -// Server is an SSH File Transfer Protocol (sftp) server. -// This is intended to provide the sftp subsystem to an ssh server daemon. -// This implementation currently supports most of sftp server protocol version 3, -// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -type Server struct { - *serverConn - debugStream io.Writer - readOnly bool - pktMgr *packetManager - openFiles map[string]*os.File - openFilesLock sync.RWMutex - handleCount int - maxTxPacket uint32 -} - -func (svr *Server) nextHandle(f *os.File) string { - svr.openFilesLock.Lock() - defer svr.openFilesLock.Unlock() - svr.handleCount++ - handle := strconv.Itoa(svr.handleCount) - svr.openFiles[handle] = f - return handle -} - -func (svr *Server) closeHandle(handle string) error { - svr.openFilesLock.Lock() - defer svr.openFilesLock.Unlock() - if f, ok := svr.openFiles[handle]; ok { - delete(svr.openFiles, handle) - return f.Close() - } - - return syscall.EBADF -} - -func (svr *Server) getHandle(handle string) (*os.File, bool) { - svr.openFilesLock.RLock() - defer svr.openFilesLock.RUnlock() - f, ok := svr.openFiles[handle] - return f, ok -} - -type serverRespondablePacket interface { - encoding.BinaryUnmarshaler - id() uint32 - respond(svr *Server) responsePacket -} - -// NewServer creates a new Server instance around the provided streams, serving -// content from the root of the filesystem. Optionally, ServerOption -// functions may be specified to further configure the Server. -// -// A subsequent call to Serve() is required to begin serving files over SFTP. -func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { - svrConn := &serverConn{ - conn: conn{ - Reader: rwc, - WriteCloser: rwc, - }, - } - s := &Server{ - serverConn: svrConn, - debugStream: ioutil.Discard, - pktMgr: newPktMgr(svrConn), - openFiles: make(map[string]*os.File), - maxTxPacket: 1 << 15, - } - - for _, o := range options { - if err := o(s); err != nil { - return nil, err - } - } - - return s, nil -} - -// A ServerOption is a function which applies configuration to a Server. -type ServerOption func(*Server) error - -// WithDebug enables Server debugging output to the supplied io.Writer. -func WithDebug(w io.Writer) ServerOption { - return func(s *Server) error { - s.debugStream = w - return nil - } -} - -// ReadOnly configures a Server to serve files in read-only mode. -func ReadOnly() ServerOption { - return func(s *Server) error { - s.readOnly = true - return nil - } -} - -type rxPacket struct { - pktType fxp - pktBytes []byte -} - -// Up to N parallel servers -func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error { - for pkt := range pktChan { - // readonly checks - readonly := true - switch pkt := pkt.requestPacket.(type) { - case notReadOnly: - readonly = false - case *sshFxpOpenPacket: - readonly = pkt.readonly() - case *sshFxpExtendedPacket: - readonly = pkt.readonly() - } - - // If server is operating read-only and a write operation is requested, - // return permission denied - if !readonly && svr.readOnly { - svr.sendPacket(orderedResponse{ - responsePacket: statusFromError(pkt, syscall.EPERM), - orderid: pkt.orderId()}) - continue - } - - if err := handlePacket(svr, pkt); err != nil { - return err - } - } - return nil -} - -func handlePacket(s *Server, p orderedRequest) error { - var rpkt responsePacket - switch p := p.requestPacket.(type) { - case *sshFxInitPacket: - rpkt = sshFxVersionPacket{Version: sftpProtocolVersion} - case *sshFxpStatPacket: - // stat the requested file - info, err := os.Stat(p.Path) - rpkt = sshFxpStatResponse{ - ID: p.ID, - info: info, - } - if err != nil { - rpkt = statusFromError(p, err) - } - case *sshFxpLstatPacket: - // stat the requested file - info, err := os.Lstat(p.Path) - rpkt = sshFxpStatResponse{ - ID: p.ID, - info: info, - } - if err != nil { - rpkt = statusFromError(p, err) - } - case *sshFxpFstatPacket: - f, ok := s.getHandle(p.Handle) - var err error = syscall.EBADF - var info os.FileInfo - if ok { - info, err = f.Stat() - rpkt = sshFxpStatResponse{ - ID: p.ID, - info: info, - } - } - if err != nil { - rpkt = statusFromError(p, err) - } - case *sshFxpMkdirPacket: - // TODO FIXME: ignore flags field - err := os.Mkdir(p.Path, 0755) - rpkt = statusFromError(p, err) - case *sshFxpRmdirPacket: - err := os.Remove(p.Path) - rpkt = statusFromError(p, err) - case *sshFxpRemovePacket: - err := os.Remove(p.Filename) - rpkt = statusFromError(p, err) - case *sshFxpRenamePacket: - err := os.Rename(p.Oldpath, p.Newpath) - rpkt = statusFromError(p, err) - case *sshFxpSymlinkPacket: - err := os.Symlink(p.Targetpath, p.Linkpath) - rpkt = statusFromError(p, err) - case *sshFxpClosePacket: - rpkt = statusFromError(p, s.closeHandle(p.Handle)) - case *sshFxpReadlinkPacket: - f, err := os.Readlink(p.Path) - rpkt = sshFxpNamePacket{ - ID: p.ID, - NameAttrs: []sshFxpNameAttr{{ - Name: f, - LongName: f, - Attrs: emptyFileStat, - }}, - } - if err != nil { - rpkt = statusFromError(p, err) - } - case *sshFxpRealpathPacket: - f, err := filepath.Abs(p.Path) - f = cleanPath(f) - rpkt = sshFxpNamePacket{ - ID: p.ID, - NameAttrs: []sshFxpNameAttr{{ - Name: f, - LongName: f, - Attrs: emptyFileStat, - }}, - } - if err != nil { - rpkt = statusFromError(p, err) - } - case *sshFxpOpendirPacket: - if stat, err := os.Stat(p.Path); err != nil { - rpkt = statusFromError(p, err) - } else if !stat.IsDir() { - rpkt = statusFromError(p, &os.PathError{ - Path: p.Path, Err: syscall.ENOTDIR}) - } else { - rpkt = sshFxpOpenPacket{ - ID: p.ID, - Path: p.Path, - Pflags: ssh_FXF_READ, - }.respond(s) - } - case *sshFxpReadPacket: - var err error = syscall.EBADF - f, ok := s.getHandle(p.Handle) - if ok { - err = nil - data := make([]byte, clamp(p.Len, s.maxTxPacket)) - n, _err := f.ReadAt(data, int64(p.Offset)) - if _err != nil && (_err != io.EOF || n == 0) { - err = _err - } - rpkt = sshFxpDataPacket{ - ID: p.ID, - Length: uint32(n), - Data: data[:n], - } - } - if err != nil { - rpkt = statusFromError(p, err) - } - - case *sshFxpWritePacket: - f, ok := s.getHandle(p.Handle) - var err error = syscall.EBADF - if ok { - _, err = f.WriteAt(p.Data, int64(p.Offset)) - } - rpkt = statusFromError(p, err) - case serverRespondablePacket: - rpkt = p.respond(s) - default: - return errors.Errorf("unexpected packet type %T", p) - } - - s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, p.orderId())) - return nil -} - -// Serve serves SFTP connections until the streams stop or the SFTP subsystem -// is stopped. -func (svr *Server) Serve() error { - var wg sync.WaitGroup - runWorker := func(ch chan orderedRequest) { - wg.Add(1) - go func() { - defer wg.Done() - if err := svr.sftpServerWorker(ch); err != nil { - svr.conn.Close() // shuts down recvPacket - } - }() - } - pktChan := svr.pktMgr.workerChan(runWorker) - - var err error - var pkt requestPacket - var pktType uint8 - var pktBytes []byte - for { - pktType, pktBytes, err = svr.recvPacket() - if err != nil { - break - } - - pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) - if err != nil { - switch errors.Cause(err) { - case errUnknownExtendedPacket: - if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { - debug("failed to send err packet: %v", err) - svr.conn.Close() // shuts down recvPacket - break - } - default: - debug("makePacket err: %v", err) - svr.conn.Close() // shuts down recvPacket - break - } - } - - pktChan <- svr.pktMgr.newOrderedRequest(pkt) - } - - close(pktChan) // shuts down sftpServerWorkers - wg.Wait() // wait for all workers to exit - - // close any still-open files - for handle, file := range svr.openFiles { - fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) - file.Close() - } - return err // error from recvPacket -} - -type ider interface { - id() uint32 -} - -// The init packet has no ID, so we just return a zero-value ID -func (p sshFxInitPacket) id() uint32 { return 0 } - -type sshFxpStatResponse struct { - ID uint32 - info os.FileInfo -} - -func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_ATTRS} - b = marshalUint32(b, p.ID) - b = marshalFileInfo(b, p.info) - return b, nil -} - -var emptyFileStat = []interface{}{uint32(0)} - -func (p sshFxpOpenPacket) readonly() bool { - return !p.hasPflags(ssh_FXF_WRITE) -} - -func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool { - for _, f := range flags { - if p.Pflags&f == 0 { - return false - } - } - return true -} - -func (p sshFxpOpenPacket) respond(svr *Server) responsePacket { - var osFlags int - if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) { - osFlags |= os.O_RDWR - } else if p.hasPflags(ssh_FXF_WRITE) { - osFlags |= os.O_WRONLY - } else if p.hasPflags(ssh_FXF_READ) { - osFlags |= os.O_RDONLY - } else { - // how are they opening? - return statusFromError(p, syscall.EINVAL) - } - - if p.hasPflags(ssh_FXF_APPEND) { - osFlags |= os.O_APPEND - } - if p.hasPflags(ssh_FXF_CREAT) { - osFlags |= os.O_CREATE - } - if p.hasPflags(ssh_FXF_TRUNC) { - osFlags |= os.O_TRUNC - } - if p.hasPflags(ssh_FXF_EXCL) { - osFlags |= os.O_EXCL - } - - f, err := os.OpenFile(p.Path, osFlags, 0644) - if err != nil { - return statusFromError(p, err) - } - - handle := svr.nextHandle(f) - return sshFxpHandlePacket{ID: p.id(), Handle: handle} -} - -func (p sshFxpReaddirPacket) respond(svr *Server) responsePacket { - f, ok := svr.getHandle(p.Handle) - if !ok { - return statusFromError(p, syscall.EBADF) - } - - dirname := f.Name() - dirents, err := f.Readdir(128) - if err != nil { - return statusFromError(p, err) - } - - ret := sshFxpNamePacket{ID: p.ID} - for _, dirent := range dirents { - ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ - Name: dirent.Name(), - LongName: runLs(dirname, dirent), - Attrs: []interface{}{dirent}, - }) - } - return ret -} - -func (p sshFxpSetstatPacket) respond(svr *Server) responsePacket { - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error - - debug("setstat name \"%s\"", p.Path) - if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = os.Truncate(p.Path, int64(size)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = os.Chmod(p.Path, os.FileMode(mode)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(p.Path, atimeT, mtimeT) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, _, err = unmarshalUint32Safe(b); err != nil { - } else { - err = os.Chown(p.Path, int(uid), int(gid)) - } - } - - return statusFromError(p, err) -} - -func (p sshFxpFsetstatPacket) respond(svr *Server) responsePacket { - f, ok := svr.getHandle(p.Handle) - if !ok { - return statusFromError(p, syscall.EBADF) - } - - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error - - debug("fsetstat name \"%s\"", f.Name()) - if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = f.Truncate(int64(size)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = f.Chmod(os.FileMode(mode)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(f.Name(), atimeT, mtimeT) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, _, err = unmarshalUint32Safe(b); err != nil { - } else { - err = f.Chown(int(uid), int(gid)) - } - } - - return statusFromError(p, err) -} - -// translateErrno translates a syscall error number to a SFTP error code. -func translateErrno(errno syscall.Errno) uint32 { - switch errno { - case 0: - return ssh_FX_OK - case syscall.ENOENT: - return ssh_FX_NO_SUCH_FILE - case syscall.EPERM: - return ssh_FX_PERMISSION_DENIED - } - - return ssh_FX_FAILURE -} - -func statusFromError(p ider, err error) sshFxpStatusPacket { - ret := sshFxpStatusPacket{ - ID: p.id(), - StatusError: StatusError{ - // ssh_FX_OK = 0 - // ssh_FX_EOF = 1 - // ssh_FX_NO_SUCH_FILE = 2 ENOENT - // ssh_FX_PERMISSION_DENIED = 3 - // ssh_FX_FAILURE = 4 - // ssh_FX_BAD_MESSAGE = 5 - // ssh_FX_NO_CONNECTION = 6 - // ssh_FX_CONNECTION_LOST = 7 - // ssh_FX_OP_UNSUPPORTED = 8 - Code: ssh_FX_OK, - }, - } - if err == nil { - return ret - } - - debug("statusFromError: error is %T %#v", err, err) - ret.StatusError.Code = ssh_FX_FAILURE - ret.StatusError.msg = err.Error() - - switch e := err.(type) { - case syscall.Errno: - ret.StatusError.Code = translateErrno(e) - case *os.PathError: - debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) - if errno, ok := e.Err.(syscall.Errno); ok { - ret.StatusError.Code = translateErrno(errno) - } - case fxerr: - ret.StatusError.Code = uint32(e) - default: - switch e { - case io.EOF: - ret.StatusError.Code = ssh_FX_EOF - case os.ErrNotExist: - ret.StatusError.Code = ssh_FX_NO_SUCH_FILE - } - } - - return ret -} - -func clamp(v, max uint32) uint32 { - if v > max { - return max - } - return v -} - -func runLsTypeWord(dirent os.FileInfo) string { - // find first character, the type char - // b Block special file. - // c Character special file. - // d Directory. - // l Symbolic link. - // s Socket link. - // p FIFO. - // - Regular file. - tc := '-' - mode := dirent.Mode() - if (mode & os.ModeDir) != 0 { - tc = 'd' - } else if (mode & os.ModeDevice) != 0 { - tc = 'b' - if (mode & os.ModeCharDevice) != 0 { - tc = 'c' - } - } else if (mode & os.ModeSymlink) != 0 { - tc = 'l' - } else if (mode & os.ModeSocket) != 0 { - tc = 's' - } else if (mode & os.ModeNamedPipe) != 0 { - tc = 'p' - } - - // owner - orc := '-' - if (mode & 0400) != 0 { - orc = 'r' - } - owc := '-' - if (mode & 0200) != 0 { - owc = 'w' - } - oxc := '-' - ox := (mode & 0100) != 0 - setuid := (mode & os.ModeSetuid) != 0 - if ox && setuid { - oxc = 's' - } else if setuid { - oxc = 'S' - } else if ox { - oxc = 'x' - } - - // group - grc := '-' - if (mode & 040) != 0 { - grc = 'r' - } - gwc := '-' - if (mode & 020) != 0 { - gwc = 'w' - } - gxc := '-' - gx := (mode & 010) != 0 - setgid := (mode & os.ModeSetgid) != 0 - if gx && setgid { - gxc = 's' - } else if setgid { - gxc = 'S' - } else if gx { - gxc = 'x' - } - - // all / others - arc := '-' - if (mode & 04) != 0 { - arc = 'r' - } - awc := '-' - if (mode & 02) != 0 { - awc = 'w' - } - axc := '-' - ax := (mode & 01) != 0 - sticky := (mode & os.ModeSticky) != 0 - if ax && sticky { - axc = 't' - } else if sticky { - axc = 'T' - } else if ax { - axc = 'x' - } - - return fmt.Sprintf("%c%c%c%c%c%c%c%c%c%c", tc, orc, owc, oxc, grc, gwc, gxc, arc, awc, axc) -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go deleted file mode 100644 index 8c01dac52..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go +++ /dev/null @@ -1,21 +0,0 @@ -package sftp - -import ( - "syscall" -) - -func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { - return &StatVFS{ - Bsize: uint64(stat.Bsize), - Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here - Blocks: stat.Blocks, - Bfree: stat.Bfree, - Bavail: stat.Bavail, - Files: stat.Files, - Ffree: stat.Ffree, - Favail: stat.Ffree, // not sure how to calculate Favail - Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? - Flag: uint64(stat.Flags), // assuming POSIX? - Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 - }, nil -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go deleted file mode 100644 index 4cf91dc83..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_impl.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build darwin linux - -// fill in statvfs structure with OS specific values -// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) - -package sftp - -import ( - "syscall" -) - -func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { - stat := &syscall.Statfs_t{} - if err := syscall.Statfs(p.Path, stat); err != nil { - return statusFromError(p, err) - } - - retPkt, err := statvfsFromStatfst(stat) - if err != nil { - return statusFromError(p, err) - } - retPkt.ID = p.ID - - return retPkt -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go deleted file mode 100644 index 1d180d47c..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux - -package sftp - -import ( - "syscall" -) - -func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { - return &StatVFS{ - Bsize: uint64(stat.Bsize), - Frsize: uint64(stat.Frsize), - Blocks: stat.Blocks, - Bfree: stat.Bfree, - Bavail: stat.Bavail, - Files: stat.Files, - Ffree: stat.Ffree, - Favail: stat.Ffree, // not sure how to calculate Favail - Flag: uint64(stat.Flags), // assuming POSIX? - Namemax: uint64(stat.Namelen), - }, nil -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go deleted file mode 100644 index c6f61643c..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !darwin,!linux - -package sftp - -import ( - "syscall" -) - -func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { - return statusFromError(p, syscall.ENOTSUP) -} diff --git a/vendor/github.com/pkg/sftp/server_stubs.go b/vendor/github.com/pkg/sftp/server_stubs.go deleted file mode 100644 index a14c73482..000000000 --- a/vendor/github.com/pkg/sftp/server_stubs.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !cgo,!plan9 windows android - -package sftp - -import ( - "os" - "time" - "fmt" -) - -func runLs(dirname string, dirent os.FileInfo) string { - typeword := runLsTypeWord(dirent) - numLinks := 1 - if dirent.IsDir() { - numLinks = 0 - } - username := "root" - groupname := "root" - mtime := dirent.ModTime() - monthStr := mtime.Month().String()[0:3] - day := mtime.Day() - year := mtime.Year() - now := time.Now() - isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) - - yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) - if isOld { - yearOrTime = fmt.Sprintf("%d", year) - } - - return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) -} diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go deleted file mode 100644 index abceca498..000000000 --- a/vendor/github.com/pkg/sftp/server_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix -// +build cgo - -package sftp - -import ( - "fmt" - "os" - "path" - "syscall" - "time" -) - -func runLsStatt(dirent os.FileInfo, statt *syscall.Stat_t) string { - // example from openssh sftp server: - // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd - // format: - // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name - - typeword := runLsTypeWord(dirent) - numLinks := statt.Nlink - uid := statt.Uid - gid := statt.Gid - username := fmt.Sprintf("%d", uid) - groupname := fmt.Sprintf("%d", gid) - // TODO FIXME: uid -> username, gid -> groupname lookup for ls -l format output - - mtime := dirent.ModTime() - monthStr := mtime.Month().String()[0:3] - day := mtime.Day() - year := mtime.Year() - now := time.Now() - isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) - - yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) - if isOld { - yearOrTime = fmt.Sprintf("%d", year) - } - - return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) -} - -// ls -l style output for a file, which is in the 'long output' section of a readdir response packet -// this is a very simple (lazy) implementation, just enough to look almost like openssh in a few basic cases -func runLs(dirname string, dirent os.FileInfo) string { - dsys := dirent.Sys() - if dsys == nil { - } else if statt, ok := dsys.(*syscall.Stat_t); !ok { - } else { - return runLsStatt(dirent, statt) - } - - return path.Join(dirname, dirent.Name()) -} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go deleted file mode 100644 index 3cdb14df8..000000000 --- a/vendor/github.com/pkg/sftp/sftp.go +++ /dev/null @@ -1,217 +0,0 @@ -// Package sftp implements the SSH File Transfer Protocol as described in -// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -package sftp - -import ( - "fmt" - - "github.com/pkg/errors" -) - -const ( - ssh_FXP_INIT = 1 - ssh_FXP_VERSION = 2 - ssh_FXP_OPEN = 3 - ssh_FXP_CLOSE = 4 - ssh_FXP_READ = 5 - ssh_FXP_WRITE = 6 - ssh_FXP_LSTAT = 7 - ssh_FXP_FSTAT = 8 - ssh_FXP_SETSTAT = 9 - ssh_FXP_FSETSTAT = 10 - ssh_FXP_OPENDIR = 11 - ssh_FXP_READDIR = 12 - ssh_FXP_REMOVE = 13 - ssh_FXP_MKDIR = 14 - ssh_FXP_RMDIR = 15 - ssh_FXP_REALPATH = 16 - ssh_FXP_STAT = 17 - ssh_FXP_RENAME = 18 - ssh_FXP_READLINK = 19 - ssh_FXP_SYMLINK = 20 - ssh_FXP_STATUS = 101 - ssh_FXP_HANDLE = 102 - ssh_FXP_DATA = 103 - ssh_FXP_NAME = 104 - ssh_FXP_ATTRS = 105 - ssh_FXP_EXTENDED = 200 - ssh_FXP_EXTENDED_REPLY = 201 -) - -const ( - ssh_FX_OK = 0 - ssh_FX_EOF = 1 - ssh_FX_NO_SUCH_FILE = 2 - ssh_FX_PERMISSION_DENIED = 3 - ssh_FX_FAILURE = 4 - ssh_FX_BAD_MESSAGE = 5 - ssh_FX_NO_CONNECTION = 6 - ssh_FX_CONNECTION_LOST = 7 - ssh_FX_OP_UNSUPPORTED = 8 - - // see draft-ietf-secsh-filexfer-13 - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 - ssh_FX_INVALID_HANDLE = 9 - ssh_FX_NO_SUCH_PATH = 10 - ssh_FX_FILE_ALREADY_EXISTS = 11 - ssh_FX_WRITE_PROTECT = 12 - ssh_FX_NO_MEDIA = 13 - ssh_FX_NO_SPACE_ON_FILESYSTEM = 14 - ssh_FX_QUOTA_EXCEEDED = 15 - ssh_FX_UNKNOWN_PRINCIPAL = 16 - ssh_FX_LOCK_CONFLICT = 17 - ssh_FX_DIR_NOT_EMPTY = 18 - ssh_FX_NOT_A_DIRECTORY = 19 - ssh_FX_INVALID_FILENAME = 20 - ssh_FX_LINK_LOOP = 21 - ssh_FX_CANNOT_DELETE = 22 - ssh_FX_INVALID_PARAMETER = 23 - ssh_FX_FILE_IS_A_DIRECTORY = 24 - ssh_FX_BYTE_RANGE_LOCK_CONFLICT = 25 - ssh_FX_BYTE_RANGE_LOCK_REFUSED = 26 - ssh_FX_DELETE_PENDING = 27 - ssh_FX_FILE_CORRUPT = 28 - ssh_FX_OWNER_INVALID = 29 - ssh_FX_GROUP_INVALID = 30 - ssh_FX_NO_MATCHING_BYTE_RANGE_LOCK = 31 -) - -const ( - ssh_FXF_READ = 0x00000001 - ssh_FXF_WRITE = 0x00000002 - ssh_FXF_APPEND = 0x00000004 - ssh_FXF_CREAT = 0x00000008 - ssh_FXF_TRUNC = 0x00000010 - ssh_FXF_EXCL = 0x00000020 -) - -type fxp uint8 - -func (f fxp) String() string { - switch f { - case ssh_FXP_INIT: - return "SSH_FXP_INIT" - case ssh_FXP_VERSION: - return "SSH_FXP_VERSION" - case ssh_FXP_OPEN: - return "SSH_FXP_OPEN" - case ssh_FXP_CLOSE: - return "SSH_FXP_CLOSE" - case ssh_FXP_READ: - return "SSH_FXP_READ" - case ssh_FXP_WRITE: - return "SSH_FXP_WRITE" - case ssh_FXP_LSTAT: - return "SSH_FXP_LSTAT" - case ssh_FXP_FSTAT: - return "SSH_FXP_FSTAT" - case ssh_FXP_SETSTAT: - return "SSH_FXP_SETSTAT" - case ssh_FXP_FSETSTAT: - return "SSH_FXP_FSETSTAT" - case ssh_FXP_OPENDIR: - return "SSH_FXP_OPENDIR" - case ssh_FXP_READDIR: - return "SSH_FXP_READDIR" - case ssh_FXP_REMOVE: - return "SSH_FXP_REMOVE" - case ssh_FXP_MKDIR: - return "SSH_FXP_MKDIR" - case ssh_FXP_RMDIR: - return "SSH_FXP_RMDIR" - case ssh_FXP_REALPATH: - return "SSH_FXP_REALPATH" - case ssh_FXP_STAT: - return "SSH_FXP_STAT" - case ssh_FXP_RENAME: - return "SSH_FXP_RENAME" - case ssh_FXP_READLINK: - return "SSH_FXP_READLINK" - case ssh_FXP_SYMLINK: - return "SSH_FXP_SYMLINK" - case ssh_FXP_STATUS: - return "SSH_FXP_STATUS" - case ssh_FXP_HANDLE: - return "SSH_FXP_HANDLE" - case ssh_FXP_DATA: - return "SSH_FXP_DATA" - case ssh_FXP_NAME: - return "SSH_FXP_NAME" - case ssh_FXP_ATTRS: - return "SSH_FXP_ATTRS" - case ssh_FXP_EXTENDED: - return "SSH_FXP_EXTENDED" - case ssh_FXP_EXTENDED_REPLY: - return "SSH_FXP_EXTENDED_REPLY" - default: - return "unknown" - } -} - -type fx uint8 - -func (f fx) String() string { - switch f { - case ssh_FX_OK: - return "SSH_FX_OK" - case ssh_FX_EOF: - return "SSH_FX_EOF" - case ssh_FX_NO_SUCH_FILE: - return "SSH_FX_NO_SUCH_FILE" - case ssh_FX_PERMISSION_DENIED: - return "SSH_FX_PERMISSION_DENIED" - case ssh_FX_FAILURE: - return "SSH_FX_FAILURE" - case ssh_FX_BAD_MESSAGE: - return "SSH_FX_BAD_MESSAGE" - case ssh_FX_NO_CONNECTION: - return "SSH_FX_NO_CONNECTION" - case ssh_FX_CONNECTION_LOST: - return "SSH_FX_CONNECTION_LOST" - case ssh_FX_OP_UNSUPPORTED: - return "SSH_FX_OP_UNSUPPORTED" - default: - return "unknown" - } -} - -type unexpectedPacketErr struct { - want, got uint8 -} - -func (u *unexpectedPacketErr) Error() string { - return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) -} - -func unimplementedPacketErr(u uint8) error { - return errors.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) -} - -type unexpectedIDErr struct{ want, got uint32 } - -func (u *unexpectedIDErr) Error() string { - return fmt.Sprintf("sftp: unexpected id: want %v, got %v", u.want, u.got) -} - -func unimplementedSeekWhence(whence int) error { - return errors.Errorf("sftp: unimplemented seek whence %v", whence) -} - -func unexpectedCount(want, got uint32) error { - return errors.Errorf("sftp: unexpected count: want %v, got %v", want, got) -} - -type unexpectedVersionErr struct{ want, got uint32 } - -func (u *unexpectedVersionErr) Error() string { - return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) -} - -// A StatusError is returned when an SFTP operation fails, and provides -// additional information about the failure. -type StatusError struct { - Code uint32 - msg, lang string -} - -func (s *StatusError) Error() string { return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) } diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore deleted file mode 100644 index 57b96b8c9..000000000 --- a/vendor/github.com/pkg/xattr/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.DS_Store - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -# Dependencies -go.sum - -_testmain.go - -*.exe -*.test - -*.swp diff --git a/vendor/github.com/pkg/xattr/.travis.sh b/vendor/github.com/pkg/xattr/.travis.sh deleted file mode 100644 index 4241b6155..000000000 --- a/vendor/github.com/pkg/xattr/.travis.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh -e - -echo "Building for Linux..." -GOOS=linux go build - -echo "Building for Darwin..." -GOOS=darwin go build - -echo "Building for FreeBSD..." -GOOS=freebsd go build - -echo "Building for Windows...(dummy)" -GOOS=windows go build - -echo "Running tests..." -go vet -go test -v -race -coverprofile=coverage.txt -covermode=atomic diff --git a/vendor/github.com/pkg/xattr/.travis.yml b/vendor/github.com/pkg/xattr/.travis.yml deleted file mode 100644 index 0223e2106..000000000 --- a/vendor/github.com/pkg/xattr/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -language: go -sudo: false - -go: - - "1.11.x" - -os: - - linux - - osx - - windows - -before_install: - - go version - - export GO111MODULE=on - - go get golang.org/x/tools/cmd/goimports - -install: - - go build - -script: - - ./.travis.sh - # goimports on windows gives false positives - - if [[ "${TRAVIS_OS_NAME}" != "windows" ]]; then diff <(goimports -d .) <(printf ""); fi - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE deleted file mode 100644 index 99d2e9dc8..000000000 --- a/vendor/github.com/pkg/xattr/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2012 Dave Cheney. All rights reserved. -Copyright (c) 2014 Kuba Podgórski. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md deleted file mode 100644 index 21b4db91f..000000000 --- a/vendor/github.com/pkg/xattr/README.md +++ /dev/null @@ -1,46 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr) -[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr) -[![Build Status](https://travis-ci.org/pkg/xattr.svg?branch=master)](https://travis-ci.org/pkg/xattr) -[![Version](https://badge.fury.io/gh/pkg%2Fxattr.svg)](https://github.com/pkg/xattr/releases) -[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr) - -xattr -===== -Extended attribute support for Go (linux + darwin + freebsd + netbsd). - -"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes) - -`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored. - -The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they -do not reference a symlink that appears at the end of a path. See -[GoDoc](http://godoc.org/github.com/pkg/xattr) for details. - -### Example -```go - const path = "/tmp/myfile" - const prefix = "user." - - if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil { - log.Fatal(err) - } - - var list []string - if list, err = xattr.List(path); err != nil { - log.Fatal(err) - } - - var data []byte - if data, err = xattr.Get(path, prefix+"test"); err != nil { - log.Fatal(err) - } - - if err = xattr.Remove(path, prefix+"test"); err != nil { - log.Fatal(err) - } - - // One can also specify the flags parameter to be passed to the OS. - if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil { - log.Fatal(err) - } -``` diff --git a/vendor/github.com/pkg/xattr/go.mod b/vendor/github.com/pkg/xattr/go.mod deleted file mode 100644 index cf556ffa1..000000000 --- a/vendor/github.com/pkg/xattr/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pkg/xattr - -require golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51 diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go deleted file mode 100644 index b3e2176d2..000000000 --- a/vendor/github.com/pkg/xattr/xattr.go +++ /dev/null @@ -1,234 +0,0 @@ -/* -Package xattr provides support for extended attributes on linux, darwin and freebsd. -Extended attributes are name:value pairs associated permanently with files and directories, -similar to the environment strings associated with a process. -An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty. -More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes . - -All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L" -variant will not follow a symlink at the end of the path, and "F" variant accepts -a file descriptor instead of a path. - -Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are -symlinks: -Get will follow "symlink1" and "symlink2" and operate on the target of -"symlink2". LGet will follow "symlink1" but operate directly on "symlink2". -*/ -package xattr - -import ( - "os" - "syscall" -) - -// Error records an error and the operation, file path and attribute that caused it. -type Error struct { - Op string - Path string - Name string - Err error -} - -func (e *Error) Error() string { - return e.Op + " " + e.Path + " " + e.Name + ": " + e.Err.Error() -} - -// Get retrieves extended attribute data associated with path. It will follow -// all symlinks along the path. -func Get(path, name string) ([]byte, error) { - return get(path, name, func(name string, data []byte) (int, error) { - return getxattr(path, name, data) - }) -} - -// LGet is like Get but does not follow a symlink at the end of the path. -func LGet(path, name string) ([]byte, error) { - return get(path, name, func(name string, data []byte) (int, error) { - return lgetxattr(path, name, data) - }) -} - -// FGet is like Get but accepts a os.File instead of a file path. -func FGet(f *os.File, name string) ([]byte, error) { - return get(f.Name(), name, func(name string, data []byte) (int, error) { - return fgetxattr(f, name, data) - }) -} - -type getxattrFunc func(name string, data []byte) (int, error) - -// get contains the buffer allocation logic used by both Get and LGet. -func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) { - const ( - // Start with a 1 KB buffer for the xattr value - initialBufSize = 1024 - - // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's - // much smaller at 64 KB. Unless the kernel is evil or buggy, we should never - // hit the limit. - maxBufSize = 64 * 1024 * 1024 - - // Function name as reported in error messages - myname = "xattr.get" - ) - - size := initialBufSize - for { - data := make([]byte, size) - read, err := getxattrFunc(name, data) - - // If the buffer was too small to fit the value, Linux and MacOS react - // differently: - // Linux: returns an ERANGE error and "-1" bytes. - // MacOS: truncates the value and returns "size" bytes. If the value - // happens to be exactly as big as the buffer, we cannot know if it was - // truncated, and we retry with a bigger buffer. Contrary to documentation, - // MacOS never seems to return ERANGE! - // To keep the code simple, we always check both conditions, and sometimes - // double the buffer size without it being strictly neccessary. - if err == syscall.ERANGE || read == size { - // The buffer was too small. Try again. - size <<= 1 - if size >= maxBufSize { - return nil, &Error{myname, path, name, syscall.EOVERFLOW} - } - continue - } - if err != nil { - return nil, &Error{myname, path, name, err} - } - return data[:read], nil - } -} - -// Set associates name and data together as an attribute of path. -func Set(path, name string, data []byte) error { - if err := setxattr(path, name, data, 0); err != nil { - return &Error{"xattr.Set", path, name, err} - } - return nil -} - -// LSet is like Set but does not follow a symlink at -// the end of the path. -func LSet(path, name string, data []byte) error { - if err := lsetxattr(path, name, data, 0); err != nil { - return &Error{"xattr.LSet", path, name, err} - } - return nil -} - -// FSet is like Set but accepts a os.File instead of a file path. -func FSet(f *os.File, name string, data []byte) error { - if err := fsetxattr(f, name, data, 0); err != nil { - return &Error{"xattr.FSet", f.Name(), name, err} - } - return nil -} - -// SetWithFlags associates name and data together as an attribute of path. -// Forwards the flags parameter to the syscall layer. -func SetWithFlags(path, name string, data []byte, flags int) error { - if err := setxattr(path, name, data, flags); err != nil { - return &Error{"xattr.SetWithFlags", path, name, err} - } - return nil -} - -// LSetWithFlags is like SetWithFlags but does not follow a symlink at -// the end of the path. -func LSetWithFlags(path, name string, data []byte, flags int) error { - if err := lsetxattr(path, name, data, flags); err != nil { - return &Error{"xattr.LSetWithFlags", path, name, err} - } - return nil -} - -// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path. -func FSetWithFlags(f *os.File, name string, data []byte, flags int) error { - if err := fsetxattr(f, name, data, flags); err != nil { - return &Error{"xattr.FSetWithFlags", f.Name(), name, err} - } - return nil -} - -// Remove removes the attribute associated with the given path. -func Remove(path, name string) error { - if err := removexattr(path, name); err != nil { - return &Error{"xattr.Remove", path, name, err} - } - return nil -} - -// LRemove is like Remove but does not follow a symlink at the end of the -// path. -func LRemove(path, name string) error { - if err := lremovexattr(path, name); err != nil { - return &Error{"xattr.LRemove", path, name, err} - } - return nil -} - -// FRemove is like Remove but accepts a os.File instead of a file path. -func FRemove(f *os.File, name string) error { - if err := fremovexattr(f, name); err != nil { - return &Error{"xattr.FRemove", f.Name(), name, err} - } - return nil -} - -// List retrieves a list of names of extended attributes associated -// with the given path in the file system. -func List(path string) ([]string, error) { - return list(path, func(data []byte) (int, error) { - return listxattr(path, data) - }) -} - -// LList is like List but does not follow a symlink at the end of the -// path. -func LList(path string) ([]string, error) { - return list(path, func(data []byte) (int, error) { - return llistxattr(path, data) - }) -} - -// FList is like List but accepts a os.File instead of a file path. -func FList(f *os.File) ([]string, error) { - return list(f.Name(), func(data []byte) (int, error) { - return flistxattr(f, data) - }) -} - -type listxattrFunc func(data []byte) (int, error) - -// list contains the buffer allocation logic used by both List and LList. -func list(path string, listxattrFunc listxattrFunc) ([]string, error) { - myname := "xattr.list" - // find size. - size, err := listxattrFunc(nil) - if err != nil { - return nil, &Error{myname, path, "", err} - } - if size > 0 { - // `size + 1` because of ERANGE error when reading - // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16). - buf := make([]byte, size+1) - // Read into buffer of that size. - read, err := listxattrFunc(buf) - if err != nil { - return nil, &Error{myname, path, "", err} - } - return stringsFromByteSlice(buf[:read]), nil - } - return []string{}, nil -} - -// bytePtrFromSlice returns a pointer to array of bytes and a size. -func bytePtrFromSlice(data []byte) (ptr *byte, size int) { - size = len(data) - if size > 0 { - ptr = &data[0] - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go deleted file mode 100644 index 09db849e4..000000000 --- a/vendor/github.com/pkg/xattr/xattr_bsd.go +++ /dev/null @@ -1,197 +0,0 @@ -// +build freebsd netbsd - -package xattr - -import ( - "os" - "syscall" - "unsafe" -) - -const ( - EXTATTR_NAMESPACE_USER = 1 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENOATTR -) - -func getxattr(path string, name string, data []byte) (int, error) { - return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data) -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return getxattr(f.Name(), name, data) -} - -// sysGet is called by getxattr and lgetxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_get_file( - const char *path, - int attrnamespace, - const char *attrname, - void *data, - size_t nbytes); - - ssize_t extattr_get_link( - const char *path, - int attrnamespace, - const char *attrname, - void *data, - size_t nbytes); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) - if err != syscall.Errno(0) { - return int(r0), err - } - return int(r0), nil -} - -func setxattr(path string, name string, data []byte, flags int) error { - return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return setxattr(f.Name(), name, data, flags) -} - -// sysSet is called by setxattr and lsetxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysSet(syscallNum uintptr, path string, name string, data []byte) error { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_set_file( - const char *path, - int attrnamespace, - const char *attrname, - const void *data, - size_t nbytes - ); - - ssize_t extattr_set_link( - const char *path, - int attrnamespace, - const char *attrname, - const void *data, - size_t nbytes - ); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) - if err != syscall.Errno(0) { - return err - } - if int(r0) != nbytes { - return syscall.E2BIG - } - return nil -} - -func removexattr(path string, name string) error { - return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name) -} - -func lremovexattr(path string, name string) error { - return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name) -} - -func fremovexattr(f *os.File, name string) error { - return removexattr(f.Name(), name) -} - -// sysSet is called by removexattr and lremovexattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysRemove(syscallNum uintptr, path string, name string) error { - /* - int extattr_delete_file( - const char *path, - int attrnamespace, - const char *attrname - ); - - int extattr_delete_link( - const char *path, - int attrnamespace, - const char *attrname - ); - */ - _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - ) - if err != syscall.Errno(0) { - return err - } - return nil -} - -func listxattr(path string, data []byte) (int, error) { - return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data) -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return listxattr(f.Name(), data) -} - -// sysSet is called by listxattr and llistxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysList(syscallNum uintptr, path string, data []byte) (int, error) { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_list_file( - const char *path, - int attrnamespace, - void *data, - size_t nbytes - ); - - ssize_t extattr_list_link( - const char *path, - int attrnamespace, - void *data, - size_t nbytes - ); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0) - if err != syscall.Errno(0) { - return int(r0), err - } - return int(r0), nil -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On FreeBSD, each entry consists of a single byte containing the length -// of the attribute name, followed by the attribute name. -// The name is _not_ terminated by NULL. -func stringsFromByteSlice(buf []byte) (result []string) { - index := 0 - for index < len(buf) { - next := index + 1 + int(buf[index]) - result = append(result, string(buf[index+1:next])) - index = next - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go deleted file mode 100644 index 8bb8cd980..000000000 --- a/vendor/github.com/pkg/xattr/xattr_darwin.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build darwin - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html -const ( - XATTR_NOFOLLOW = 0x0001 - XATTR_CREATE = 0x0002 - XATTR_REPLACE = 0x0004 - XATTR_NOSECURITY = 0x0008 - XATTR_NODEFAULT = 0x0010 - XATTR_SHOWCOMPRESSION = 0x0020 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENOATTR -) - -func getxattr(path string, name string, data []byte) (int, error) { - return unix.Getxattr(path, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return unix.Lgetxattr(path, name, data) -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return getxattr(f.Name(), name, data) -} - -func setxattr(path string, name string, data []byte, flags int) error { - return unix.Setxattr(path, name, data, flags) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return unix.Lsetxattr(path, name, data, flags) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return setxattr(f.Name(), name, data, flags) -} - -func removexattr(path string, name string) error { - return unix.Removexattr(path, name) -} - -func lremovexattr(path string, name string) error { - return unix.Lremovexattr(path, name) -} - -func fremovexattr(f *os.File, name string) error { - return removexattr(f.Name(), name) -} - -func listxattr(path string, data []byte) (int, error) { - return unix.Listxattr(path, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return unix.Llistxattr(path, data) -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return listxattr(f.Name(), data) -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin and Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go deleted file mode 100644 index 0c6585de9..000000000 --- a/vendor/github.com/pkg/xattr/xattr_linux.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build linux - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - XATTR_CREATE = unix.XATTR_CREATE - XATTR_REPLACE = unix.XATTR_REPLACE - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENODATA -) - -func getxattr(path string, name string, data []byte) (int, error) { - return unix.Getxattr(path, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return unix.Lgetxattr(path, name, data) -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return unix.Fgetxattr(int(f.Fd()), name, data) -} - -func setxattr(path string, name string, data []byte, flags int) error { - return unix.Setxattr(path, name, data, flags) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return unix.Lsetxattr(path, name, data, flags) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return unix.Fsetxattr(int(f.Fd()), name, data, flags) -} - -func removexattr(path string, name string) error { - return unix.Removexattr(path, name) -} - -func lremovexattr(path string, name string) error { - return unix.Lremovexattr(path, name) -} - -func fremovexattr(f *os.File, name string) error { - return unix.Fremovexattr(int(f.Fd()), name) -} - -func listxattr(path string, data []byte) (int, error) { - return unix.Listxattr(path, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return unix.Llistxattr(path, data) -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return unix.Flistxattr(int(f.Fd()), data) -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin and Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go deleted file mode 100644 index 305aefd7e..000000000 --- a/vendor/github.com/pkg/xattr/xattr_unsupported.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !linux,!freebsd,!netbsd,!darwin - -package xattr - -import ( - "os" -) - -func getxattr(path string, name string, data []byte) (int, error) { - return 0, nil -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return 0, nil -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return 0, nil -} - -func setxattr(path string, name string, data []byte, flags int) error { - return nil -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return nil -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return nil -} - -func removexattr(path string, name string) error { - return nil -} - -func lremovexattr(path string, name string) error { - return nil -} - -func fremovexattr(f *os.File, name string) error { - return nil -} - -func listxattr(path string, data []byte) (int, error) { - return 0, nil -} - -func llistxattr(path string, data []byte) (int, error) { - return 0, nil -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return 0, nil -} - -// dummy -func stringsFromByteSlice(buf []byte) (result []string) { - return []string{} -} diff --git a/vendor/github.com/restic/chunker/.travis.yml b/vendor/github.com/restic/chunker/.travis.yml deleted file mode 100644 index 71fc89384..000000000 --- a/vendor/github.com/restic/chunker/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - os: linux - go: "1.9.x" - - os: linux - go: "1.10.x" - - os: linux - go: "tip" - - os: osx - go: "1.10.x" - -install: - - go get -t ./... - - go get -u golang.org/x/lint/golint - - go get -u golang.org/x/tools/cmd/goimports - -script: - - go vet ./... - - go test -v -cpu=2 ./... - - go test -v -cpu=1,2,4 -short -race ./... - - diff -au <(goimports -d .) <(printf "") - - diff -au <(golint ./...) <(printf "") diff --git a/vendor/github.com/restic/chunker/LICENSE b/vendor/github.com/restic/chunker/LICENSE deleted file mode 100644 index 04f854350..000000000 --- a/vendor/github.com/restic/chunker/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2014, Alexander Neumann -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/restic/chunker/README.md b/vendor/github.com/restic/chunker/README.md deleted file mode 100644 index ae84c5497..000000000 --- a/vendor/github.com/restic/chunker/README.md +++ /dev/null @@ -1,12 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/restic/chunker?status.svg)](http://godoc.org/github.com/restic/chunker) -[![Build Status](https://travis-ci.com/restic/chunker.svg?branch=master)](https://travis-ci.com/restic/chunker) - -The package `chunker` implements content-defined-chunking (CDC) based on a -rolling Rabin Hash. The library is part of the [restic backup -program](https://github.com/restic/restic). - -An introduction to Content Defined Chunking can be found in the restic blog -post [Foundation - Introducing Content Defined Chunking (CDC)](https://restic.github.io/blog/2015-09-12/restic-foundation1-cdc). - -You can find the API documentation at -https://godoc.org/github.com/restic/chunker diff --git a/vendor/github.com/restic/chunker/chunker.go b/vendor/github.com/restic/chunker/chunker.go deleted file mode 100644 index 593be8f18..000000000 --- a/vendor/github.com/restic/chunker/chunker.go +++ /dev/null @@ -1,378 +0,0 @@ -package chunker - -import ( - "errors" - "io" - "sync" -) - -const ( - kiB = 1024 - miB = 1024 * kiB - - // WindowSize is the size of the sliding window. - windowSize = 64 - - // MinSize is the default minimal size of a chunk. - MinSize = 512 * kiB - // MaxSize is the default maximal size of a chunk. - MaxSize = 8 * miB - - chunkerBufSize = 512 * kiB -) - -type tables struct { - out [256]Pol - mod [256]Pol -} - -// cache precomputed tables, these are read-only anyway -var cache struct { - entries map[Pol]tables - sync.Mutex -} - -func init() { - cache.entries = make(map[Pol]tables) -} - -// Chunk is one content-dependent chunk of bytes whose end was cut when the -// Rabin Fingerprint had the value stored in Cut. -type Chunk struct { - Start uint - Length uint - Cut uint64 - Data []byte -} - -type chunkerState struct { - window [windowSize]byte - wpos uint - - buf []byte - bpos uint - bmax uint - - start uint - count uint - pos uint - - pre uint // wait for this many bytes before start calculating an new chunk - - digest uint64 -} - -type chunkerConfig struct { - MinSize, MaxSize uint - - pol Pol - polShift uint - tables tables - tablesInitialized bool - splitmask uint64 - - rd io.Reader - closed bool -} - -// Chunker splits content with Rabin Fingerprints. -type Chunker struct { - chunkerConfig - chunkerState -} - -// SetAverageBits allows to control the frequency of chunk discovery: -// the lower averageBits, the higher amount of chunks will be identified. -// The default value is 20 bits, so chunks will be of 1MiB size on average. -func (c *Chunker) SetAverageBits(averageBits int) { - c.splitmask = (1 << uint64(averageBits)) - 1 -} - -// New returns a new Chunker based on polynomial p that reads from rd. -func New(rd io.Reader, pol Pol) *Chunker { - return NewWithBoundaries(rd, pol, MinSize, MaxSize) -} - -// NewWithBoundaries returns a new Chunker based on polynomial p that reads from -// rd and custom min and max size boundaries. -func NewWithBoundaries(rd io.Reader, pol Pol, min, max uint) *Chunker { - c := &Chunker{ - chunkerState: chunkerState{ - buf: make([]byte, chunkerBufSize), - }, - chunkerConfig: chunkerConfig{ - pol: pol, - rd: rd, - MinSize: min, - MaxSize: max, - splitmask: (1 << 20) - 1, // aim to create chunks of 20 bits or about 1MiB on average. - }, - } - - c.reset() - - return c -} - -// Reset reinitializes the chunker with a new reader and polynomial. -func (c *Chunker) Reset(rd io.Reader, pol Pol) { - c.ResetWithBoundaries(rd, pol, MinSize, MaxSize) -} - -// ResetWithBoundaries reinitializes the chunker with a new reader, polynomial -// and custom min and max size boundaries. -func (c *Chunker) ResetWithBoundaries(rd io.Reader, pol Pol, min, max uint) { - *c = Chunker{ - chunkerState: chunkerState{ - buf: c.buf, - }, - chunkerConfig: chunkerConfig{ - pol: pol, - rd: rd, - MinSize: min, - MaxSize: max, - splitmask: (1 << 20) - 1, - }, - } - - c.reset() -} - -func (c *Chunker) reset() { - c.polShift = uint(c.pol.Deg() - 8) - c.fillTables() - - for i := 0; i < windowSize; i++ { - c.window[i] = 0 - } - - c.closed = false - c.digest = 0 - c.wpos = 0 - c.count = 0 - c.digest = c.slide(c.digest, 1) - c.start = c.pos - - // do not start a new chunk unless at least MinSize bytes have been read - c.pre = c.MinSize - windowSize -} - -// fillTables calculates out_table and mod_table for optimization. This -// implementation uses a cache in the global variable cache. -func (c *Chunker) fillTables() { - // if polynomial hasn't been specified, do not compute anything for now - if c.pol == 0 { - return - } - - c.tablesInitialized = true - - // test if the tables are cached for this polynomial - cache.Lock() - defer cache.Unlock() - if t, ok := cache.entries[c.pol]; ok { - c.tables = t - return - } - - // calculate table for sliding out bytes. The byte to slide out is used as - // the index for the table, the value contains the following: - // out_table[b] = Hash(b || 0 || ... || 0) - // \ windowsize-1 zero bytes / - // To slide out byte b_0 for window size w with known hash - // H := H(b_0 || ... || b_w), it is sufficient to add out_table[b_0]: - // H(b_0 || ... || b_w) + H(b_0 || 0 || ... || 0) - // = H(b_0 + b_0 || b_1 + 0 || ... || b_w + 0) - // = H( 0 || b_1 || ... || b_w) - // - // Afterwards a new byte can be shifted in. - for b := 0; b < 256; b++ { - var h Pol - - h = appendByte(h, byte(b), c.pol) - for i := 0; i < windowSize-1; i++ { - h = appendByte(h, 0, c.pol) - } - c.tables.out[b] = h - } - - // calculate table for reduction mod Polynomial - k := c.pol.Deg() - for b := 0; b < 256; b++ { - // mod_table[b] = A | B, where A = (b(x) * x^k mod pol) and B = b(x) * x^k - // - // The 8 bits above deg(Polynomial) determine what happens next and so - // these bits are used as a lookup to this table. The value is split in - // two parts: Part A contains the result of the modulus operation, part - // B is used to cancel out the 8 top bits so that one XOR operation is - // enough to reduce modulo Polynomial - c.tables.mod[b] = Pol(uint64(b)<> polShift' - if polShift > 53-8 { - return Chunk{}, errors.New("the polynomial must have a degree less than or equal 53") - } - minSize := c.MinSize - maxSize := c.MaxSize - buf := c.buf - for { - if c.bpos >= c.bmax { - n, err := io.ReadFull(c.rd, buf[:]) - - if err == io.ErrUnexpectedEOF { - err = nil - } - - // io.ReadFull only returns io.EOF when no bytes could be read. If - // this is the case and we're in this branch, there are no more - // bytes to buffer, so this was the last chunk. If a different - // error has occurred, return that error and abandon the current - // chunk. - if err == io.EOF && !c.closed { - c.closed = true - - // return current chunk, if any bytes have been processed - if c.count > 0 { - return Chunk{ - Start: c.start, - Length: c.count, - Cut: c.digest, - Data: data, - }, nil - } - } - - if err != nil { - return Chunk{}, err - } - - c.bpos = 0 - c.bmax = uint(n) - } - - // check if bytes have to be dismissed before starting a new chunk - if c.pre > 0 { - n := c.bmax - c.bpos - if c.pre > uint(n) { - c.pre -= uint(n) - data = append(data, buf[c.bpos:c.bmax]...) - - c.count += uint(n) - c.pos += uint(n) - c.bpos = c.bmax - - continue - } - - data = append(data, buf[c.bpos:c.bpos+c.pre]...) - - c.bpos += c.pre - c.count += c.pre - c.pos += c.pre - c.pre = 0 - } - - add := c.count - digest := c.digest - win := c.window - wpos := c.wpos - for _, b := range buf[c.bpos:c.bmax] { - // slide(b) - // limit wpos before to elide array bound checks - wpos = wpos % windowSize - out := win[wpos] - win[wpos] = b - digest ^= uint64(tabout[out]) - wpos++ - - // updateDigest - index := byte(digest >> polShift) - digest <<= 8 - digest |= uint64(b) - - digest ^= uint64(tabmod[index]) - // end manual inline - - add++ - if add < minSize { - continue - } - - if (digest&c.splitmask) == 0 || add >= maxSize { - i := add - c.count - 1 - data = append(data, c.buf[c.bpos:c.bpos+uint(i)+1]...) - c.count = add - c.pos += uint(i) + 1 - c.bpos += uint(i) + 1 - c.buf = buf - - chunk := Chunk{ - Start: c.start, - Length: c.count, - Cut: digest, - Data: data, - } - - c.reset() - - return chunk, nil - } - } - c.digest = digest - c.window = win - c.wpos = wpos % windowSize - - steps := c.bmax - c.bpos - if steps > 0 { - data = append(data, c.buf[c.bpos:c.bpos+steps]...) - } - c.count += steps - c.pos += steps - c.bpos = c.bmax - } -} - -func updateDigest(digest uint64, polShift uint, tab tables, b byte) (newDigest uint64) { - index := digest >> polShift - digest <<= 8 - digest |= uint64(b) - - digest ^= uint64(tab.mod[index]) - return digest -} - -func (c *Chunker) slide(digest uint64, b byte) (newDigest uint64) { - out := c.window[c.wpos] - c.window[c.wpos] = b - digest ^= uint64(c.tables.out[out]) - c.wpos = (c.wpos + 1) % windowSize - - digest = updateDigest(digest, c.polShift, c.tables, b) - return digest -} - -func appendByte(hash Pol, b byte, pol Pol) Pol { - hash <<= 8 - hash |= Pol(b) - - return hash.Mod(pol) -} diff --git a/vendor/github.com/restic/chunker/doc.go b/vendor/github.com/restic/chunker/doc.go deleted file mode 100644 index 5537c172c..000000000 --- a/vendor/github.com/restic/chunker/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2014 Alexander Neumann. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package chunker implements Content Defined Chunking (CDC) based on a rolling -Rabin Checksum. - -Choosing a Random Irreducible Polynomial - -The function RandomPolynomial() returns a new random polynomial of degree 53 -for use with the chunker. The degree 53 is chosen because it is the largest -prime below 64-8 = 56, so that the top 8 bits of an uint64 can be used for -optimising calculations in the chunker. - -A random polynomial is chosen selecting 64 random bits, masking away bits -64..54 and setting bit 53 to one (otherwise the polynomial is not of the -desired degree) and bit 0 to one (otherwise the polynomial is trivially -reducible), so that 51 bits are chosen at random. - -This process is repeated until Irreducible() returns true, then this -polynomials is returned. If this doesn't happen after 1 million tries, the -function returns an error. The probability for selecting an irreducible -polynomial at random is about 7.5% ( (2^53-2)/53 / 2^51), so the probability -that no irreducible polynomial has been found after 100 tries is lower than -0.04%. - -Verifying Irreducible Polynomials - -During development the results have been verified using the computational -discrete algebra system GAP, which can be obtained from the website at -http://www.gap-system.org/. - -For filtering a given list of polynomials in hexadecimal coefficient notation, -the following script can be used: - - # create x over F_2 = GF(2) - x := Indeterminate(GF(2), "x"); - - # test if polynomial is irreducible, i.e. the number of factors is one - IrredPoly := function (poly) - return (Length(Factors(poly)) = 1); - end;; - - # create a polynomial in x from the hexadecimal representation of the - # coefficients - Hex2Poly := function (s) - return ValuePol(CoefficientsQadic(IntHexString(s), 2), x); - end;; - - # list of candidates, in hex - candidates := [ "3DA3358B4DC173" ]; - - # create real polynomials - L := List(candidates, Hex2Poly); - - # filter and display the list of irreducible polynomials contained in L - Display(Filtered(L, x -> (IrredPoly(x)))); - -All irreducible polynomials from the list are written to the output. - -Background Literature - -An introduction to Rabin Fingerprints/Checksums can be found in the following articles: - -Michael O. Rabin (1981): "Fingerprinting by Random Polynomials" -http://www.xmailserver.org/rabin.pdf - -Ross N. Williams (1993): "A Painless Guide to CRC Error Detection Algorithms" -http://www.zlib.net/crc_v3.txt - -Andrei Z. Broder (1993): "Some Applications of Rabin's Fingerprinting Method" -http://www.xmailserver.org/rabin_apps.pdf - -Shuhong Gao and Daniel Panario (1997): "Tests and Constructions of Irreducible Polynomials over Finite Fields" -http://www.math.clemson.edu/~sgao/papers/GP97a.pdf - -Andrew Kadatch, Bob Jenkins (2007): "Everything we know about CRC but afraid to forget" -http://crcutil.googlecode.com/files/crc-doc.1.0.pdf - -*/ -package chunker diff --git a/vendor/github.com/restic/chunker/go.mod b/vendor/github.com/restic/chunker/go.mod deleted file mode 100644 index b595a6c94..000000000 --- a/vendor/github.com/restic/chunker/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/restic/chunker diff --git a/vendor/github.com/restic/chunker/polynomials.go b/vendor/github.com/restic/chunker/polynomials.go deleted file mode 100644 index 8b126296f..000000000 --- a/vendor/github.com/restic/chunker/polynomials.go +++ /dev/null @@ -1,310 +0,0 @@ -package chunker - -import ( - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "io" - "strconv" -) - -// Pol is a polynomial from F_2[X]. -type Pol uint64 - -// Add returns x+y. -func (x Pol) Add(y Pol) Pol { - r := Pol(uint64(x) ^ uint64(y)) - return r -} - -// mulOverflows returns true if the multiplication would overflow uint64. -// Code by Rob Pike, see -// https://groups.google.com/d/msg/golang-nuts/h5oSN5t3Au4/KaNQREhZh0QJ -func mulOverflows(a, b Pol) bool { - if a <= 1 || b <= 1 { - return false - } - c := a.mul(b) - d := c.Div(b) - if d != a { - return true - } - - return false -} - -func (x Pol) mul(y Pol) Pol { - if x == 0 || y == 0 { - return 0 - } - - var res Pol - for i := 0; i <= y.Deg(); i++ { - if (y & (1 << uint(i))) > 0 { - res = res.Add(x << uint(i)) - } - } - - return res -} - -// Mul returns x*y. When an overflow occurs, Mul panics. -func (x Pol) Mul(y Pol) Pol { - if mulOverflows(x, y) { - panic("multiplication would overflow uint64") - } - - return x.mul(y) -} - -// Deg returns the degree of the polynomial x. If x is zero, -1 is returned. -func (x Pol) Deg() int { - // the degree of 0 is -1 - if x == 0 { - return -1 - } - - // see https://graphics.stanford.edu/~seander/bithacks.html#IntegerLog - - r := 0 - if uint64(x)&0xffffffff00000000 > 0 { - x >>= 32 - r |= 32 - } - - if uint64(x)&0xffff0000 > 0 { - x >>= 16 - r |= 16 - } - - if uint64(x)&0xff00 > 0 { - x >>= 8 - r |= 8 - } - - if uint64(x)&0xf0 > 0 { - x >>= 4 - r |= 4 - } - - if uint64(x)&0xc > 0 { - x >>= 2 - r |= 2 - } - - if uint64(x)&0x2 > 0 { - r |= 1 - } - - return r -} - -// String returns the coefficients in hex. -func (x Pol) String() string { - return "0x" + strconv.FormatUint(uint64(x), 16) -} - -// Expand returns the string representation of the polynomial x. -func (x Pol) Expand() string { - if x == 0 { - return "0" - } - - s := "" - for i := x.Deg(); i > 1; i-- { - if x&(1< 0 { - s += fmt.Sprintf("+x^%d", i) - } - } - - if x&2 > 0 { - s += "+x" - } - - if x&1 > 0 { - s += "+1" - } - - return s[1:] -} - -// DivMod returns x / d = q, and remainder r, -// see https://en.wikipedia.org/wiki/Division_algorithm -func (x Pol) DivMod(d Pol) (Pol, Pol) { - if x == 0 { - return 0, 0 - } - - if d == 0 { - panic("division by zero") - } - - D := d.Deg() - diff := x.Deg() - D - if diff < 0 { - return 0, x - } - - var q Pol - for diff >= 0 { - m := d << uint(diff) - q |= (1 << uint(diff)) - x = x.Add(m) - - diff = x.Deg() - D - } - - return q, x -} - -// Div returns the integer division result x / d. -func (x Pol) Div(d Pol) Pol { - q, _ := x.DivMod(d) - return q -} - -// Mod returns the remainder of x / d -func (x Pol) Mod(d Pol) Pol { - _, r := x.DivMod(d) - return r -} - -// I really dislike having a function that does not terminate, so specify a -// really large upper bound for finding a new irreducible polynomial, and -// return an error when no irreducible polynomial has been found within -// randPolMaxTries. -const randPolMaxTries = 1e6 - -// RandomPolynomial returns a new random irreducible polynomial -// of degree 53 using the default System CSPRNG as source. -// It is equivalent to calling DerivePolynomial(rand.Reader). -func RandomPolynomial() (Pol, error) { - return DerivePolynomial(rand.Reader) -} - -// DerivePolynomial returns an irreducible polynomial of degree 53 -// (largest prime number below 64-8) by reading bytes from source. -// There are (2^53-2/53) irreducible polynomials of degree 53 in -// F_2[X], c.f. Michael O. Rabin (1981): "Fingerprinting by Random -// Polynomials", page 4. If no polynomial could be found in one -// million tries, an error is returned. -func DerivePolynomial(source io.Reader) (Pol, error) { - for i := 0; i < randPolMaxTries; i++ { - var f Pol - - // choose polynomial at (pseudo)random - err := binary.Read(source, binary.LittleEndian, &f) - if err != nil { - return 0, err - } - - // mask away bits above bit 53 - f &= Pol((1 << 54) - 1) - - // set highest and lowest bit so that the degree is 53 and the - // polynomial is not trivially reducible - f |= (1 << 53) | 1 - - // test if f is irreducible - if f.Irreducible() { - return f, nil - } - } - - // If this is reached, we haven't found an irreducible polynomial in - // randPolMaxTries. This error is very unlikely to occur. - return 0, errors.New("unable to find new random irreducible polynomial") -} - -// GCD computes the Greatest Common Divisor x and f. -func (x Pol) GCD(f Pol) Pol { - if f == 0 { - return x - } - - if x == 0 { - return f - } - - if x.Deg() < f.Deg() { - x, f = f, x - } - - return f.GCD(x.Mod(f)) -} - -// Irreducible returns true iff x is irreducible over F_2. This function -// uses Ben Or's reducibility test. -// -// For details see "Tests and Constructions of Irreducible Polynomials over -// Finite Fields". -func (x Pol) Irreducible() bool { - for i := 1; i <= x.Deg()/2; i++ { - if x.GCD(qp(uint(i), x)) != 1 { - return false - } - } - - return true -} - -// MulMod computes x*f mod g -func (x Pol) MulMod(f, g Pol) Pol { - if x == 0 || f == 0 { - return 0 - } - - var res Pol - for i := 0; i <= f.Deg(); i++ { - if (f & (1 << uint(i))) > 0 { - a := x - for j := 0; j < i; j++ { - a = a.Mul(2).Mod(g) - } - res = res.Add(a).Mod(g) - } - } - - return res -} - -// qp computes the polynomial (x^(2^p)-x) mod g. This is needed for the -// reducibility test. -func qp(p uint, g Pol) Pol { - num := (1 << p) - i := 1 - - // start with x - res := Pol(2) - - for i < num { - // repeatedly square res - res = res.MulMod(res, g) - i *= 2 - } - - // add x - return res.Add(2).Mod(g) -} - -// MarshalJSON returns the JSON representation of the Pol. -func (x Pol) MarshalJSON() ([]byte, error) { - buf := strconv.AppendUint([]byte{'"'}, uint64(x), 16) - buf = append(buf, '"') - return buf, nil -} - -// UnmarshalJSON parses a Pol from the JSON data. -func (x *Pol) UnmarshalJSON(data []byte) error { - if len(data) < 2 { - return errors.New("invalid string for polynomial") - } - n, err := strconv.ParseUint(string(data[1:len(data)-1]), 16, 64) - if err != nil { - return err - } - *x = Pol(n) - - return nil -} diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore deleted file mode 100644 index 75623dccc..000000000 --- a/vendor/github.com/russross/blackfriday/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.out -*.swp -*.8 -*.6 -_obj -_test* -markdown -tags diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml deleted file mode 100644 index 2f3351d7a..000000000 --- a/vendor/github.com/russross/blackfriday/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: go -go: - - "1.9.x" - - "1.10.x" - - tip -matrix: - fast_finish: true - allow_failures: - - go: tip -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt deleted file mode 100644 index 2885af360..000000000 --- a/vendor/github.com/russross/blackfriday/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md deleted file mode 100644 index 3c62e1375..000000000 --- a/vendor/github.com/russross/blackfriday/README.md +++ /dev/null @@ -1,369 +0,0 @@ -Blackfriday -[![Build Status][BuildSVG]][BuildURL] -[![Godoc][GodocV2SVG]][GodocV2URL] -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with any modern Go release. With Go and git installed: - - go get -u gopkg.in/russross/blackfriday.v2 - -will download, compile, and install the package into your `$GOPATH` directory -hierarchy. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the -documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. - -It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -If you are still interested in the legacy `v1`, you can import it from -`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://godoc.org/github.com/russross/blackfriday - -### Known issue with `dep` - -There is a known problem with using Blackfriday v1 _transitively_ and `dep`. -Currently `dep` prioritizes semver versions over anything else, and picks the -latest one, plus it does not apply a `[[constraint]]` specifier to transitively -pulled in packages. So if you're using something that uses Blackfriday v1, but -that something does not use `dep` yet, you will get Blackfriday v2 pulled in and -your first dependency will fail to build. - -There are couple of fixes for it, documented here: -https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version - -Meanwhile, `dep` team is working on a more general solution to the constraints -on transitive dependencies problem: https://github.com/golang/dep/issues/1124. - - -Usage ------ - -### v1 - -For basic usage, it is as simple as getting your input into a byte -slice and calling: - - output := blackfriday.MarkdownBasic(input) - -This renders it with no extensions enabled. To get a more useful -feature set, use this instead: - - output := blackfriday.MarkdownCommon(input) - -### v2 - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "gopkg.in/russross/blackfriday.v2" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options, v1 - -If you want to customize the set of options, first get a renderer -(currently only the HTML output engine), then use it to -call the more general `Markdown` function. For examples, see the -implementations of `MarkdownBasic` and `MarkdownCommon` in -`markdown.go`. - -### Custom options, v2 - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -### `blackfriday-tool` - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - -### Sanitized anchor names - -Blackfriday includes an algorithm for creating sanitized anchor names -corresponding to a given input text. This algorithm is used to create -anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The -algorithm has a specification, so that other packages can create -compatible anchor names and links to those anchors. - -The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. - -[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to -create compatible links to the anchor names generated by blackfriday. -This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients -that want a small package and don't need full functionality of blackfriday. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ``` go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - - To preserve classes of fenced code blocks while using the bluemonday - HTML sanitizer, use the following policy: - - ``` go - p := bluemonday.UGCPolicy() - p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") - html := p.SanitizeBytes(unsafe) - ``` - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled (it is off by - default in the `MarkdownBasic` and `MarkdownCommon` convenience - functions), newlines in the input translate into line breaks in - the output. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): - renders output as LaTeX. - -* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience - integration with the [Chroma](https://github.com/alecthomas/chroma) code - highlighting library. bfchroma is only compatible with v2 of Blackfriday and - provides a drop-in renderer ready to use with Blackfriday, as well as - options and means for further customization. - - -TODO ----- - -* More unit testing -* Improve Unicode support. It does not understand all Unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all UTF-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" - [7]: https://github.com/golang/dep/ "dep" - [8]: https://github.com/Masterminds/glide "Glide" - - [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master - [BuildURL]: https://travis-ci.org/russross/blackfriday - [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg - [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go deleted file mode 100644 index 45c21a6c2..000000000 --- a/vendor/github.com/russross/blackfriday/block.go +++ /dev/null @@ -1,1474 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse block-level elements. -// - -package blackfriday - -import ( - "bytes" - "strings" - "unicode" -) - -// Parse block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *parser) block(out *bytes.Buffer, data []byte) { - if len(data) == 0 || data[len(data)-1] != '\n' { - panic("block input is missing terminating newline") - } - - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // prefixed header: - // - // # Header 1 - // ## Header 2 - // ... - // ###### Header 6 - if p.isPrefixHeader(data) { - data = data[p.prefixHeader(out, data):] - continue - } - - // block of preformatted HTML: - // - //
- // ... - //
- if data[0] == '<' { - if i := p.html(out, data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.flags&EXTENSION_TITLEBLOCK != 0 { - if data[0] == '%' { - if i := p.titleBlock(out, data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(out, data):] - continue - } - - // fenced code block: - // - // ``` go info string here - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.flags&EXTENSION_FENCED_CODE != 0 { - if i := p.fencedCodeBlock(out, data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.r.HRule(out) - var i int - for i = 0; data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(out, data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.flags&EXTENSION_TABLES != 0 { - if i := p.table(out, data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(out, data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_ORDERED):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_DEFINITION):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headers, too - data = data[p.paragraph(out, data):] - } - - p.nesting-- -} - -func (p *parser) isPrefixHeader(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.flags&EXTENSION_SPACE_HEADERS != 0 { - level := 0 - for level < 6 && data[level] == '#' { - level++ - } - if data[level] != ' ' { - return false - } - } - return true -} - -func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { - level := 0 - for level < 6 && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.flags&EXTENSION_HEADER_IDS != 0 { - j, k := 0, 0 - // find start/end of header id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract header id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { - id = SanitizedAnchorName(string(data[i:end])) - } - work := func() bool { - p.inline(out, data[i:end]) - return true - } - p.r.Header(out, work, level, id) - } - return skip -} - -func (p *parser) isUnderlinedHeader(data []byte) int { - // test of level 1 header - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if data[i] == '\n' { - return 1 - } else { - return 0 - } - } - - // test of level 2 header - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if data[i] == '\n' { - return 2 - } else { - return 0 - } - } - - return 0 -} - -func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - p.r.TitleBlock(out, data) - - return len(data) -} - -func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(out, data, doRender); size > 0 { - return size - } - - // check for an
tag - if size := p.htmlHr(out, data, doRender); size > 0 { - return size - } - - // check for HTML CDATA - if size := p.htmlCDATA(out, data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - p.r.BlockHtml(out, data[:end]) - } - - return i -} - -func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { - // html block needs to end with a blank line - if i := p.isEmpty(data[start:]); i > 0 { - size := start + i - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - p.r.BlockHtml(out, data[:end]) - } - return size - } - return 0 -} - -// HTML comment, lax form -func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { - i := p.inlineHTMLComment(out, data) - return p.renderHTMLBlock(out, data, i, doRender) -} - -// HTML CDATA section -func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { - const cdataTag = "') { - i++ - } - i++ - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return p.renderHTMLBlock(out, data, i, doRender) -} - -// HR, which is the only self-closing block tag considered -func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
tag after all; at least not a valid one - return 0 - } - - i := 3 - for data[i] != '>' && data[i] != '\n' { - i++ - } - - if data[i] == '>' { - return p.renderHTMLBlock(out, data, i+1, doRender) - } - - return 0 -} - -func (p *parser) htmlFindTag(data []byte) (string, bool) { - i := 0 - for isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *parser) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*parser) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - return i + 1 -} - -func (*parser) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If syntax is not nil, it gets set to the syntax specified in the fence line. -// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. -func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the info string, and discard it if the caller doesn't care. - if info != nil { - infoLength := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if newlineOptional && i == len(data) { - return i, marker - } - return 0, "" - } - - infoStart := i - - if data[i] == '{' { - i++ - infoStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - infoLength++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for infoLength > 0 && isspace(data[infoStart]) { - infoStart++ - infoLength-- - } - - for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { - infoLength-- - } - - i++ - } else { - for i < len(data) && !isverticalspace(data[i]) { - infoLength++ - i++ - } - } - - *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) - } - - i = skipChar(data, i, ' ') - if i >= len(data) || data[i] != '\n' { - if newlineOptional && i == len(data) { - return i, marker - } - return 0, "" - } - - return i + 1, marker // Take newline into account. -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { - var infoString string - beg, marker := isFenceLine(data, &infoString, "", false) - if beg == 0 || beg >= len(data) { - return 0 - } - - var work bytes.Buffer - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - newlineOptional := !doRender - fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - p.r.BlockCode(out, work.Bytes(), infoString) - } - - return beg -} - -func (p *parser) table(out *bytes.Buffer, data []byte) int { - var header bytes.Buffer - i, columns := p.tableHeader(&header, data) - if i == 0 { - return 0 - } - - var body bytes.Buffer - - for i < len(data) { - pipes, rowStart := 0, i - for ; data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - i++ - p.tableRow(&body, data[rowStart:i], columns, false) - } - - p.r.Table(out, header.Bytes(), body.Bytes(), columns) - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { - i := 0 - colCount := 1 - for i = 0; data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - header := data[:i+1] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]int, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TABLE_ALIGNMENT_LEFT - dashes++ - } - for data[i] == '-' { - i++ - dashes++ - } - if data[i] == ':' { - i++ - columns[col] |= TABLE_ALIGNMENT_RIGHT - dashes++ - } - for data[i] == ' ' { - i++ - } - - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.tableRow(out, header, columns, true) - size = i + 1 - return -} - -func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { - i, col := 0, 0 - var rowWork bytes.Buffer - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for data[i] == ' ' { - i++ - } - - cellStart := i - - for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && data[cellEnd-1] == ' ' { - cellEnd-- - } - - var cellWork bytes.Buffer - p.inline(&cellWork, data[cellStart:cellEnd]) - - if header { - p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) - } else { - p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) - } - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - if header { - p.r.TableHeaderCell(&rowWork, nil, columns[col]) - } else { - p.r.TableCell(&rowWork, nil, columns[col]) - } - } - - // silently ignore rows with too many cells - - p.r.TableRow(out, rowWork.Bytes()) -} - -// returns blockquote prefix length -func (p *parser) quotePrefix(data []byte) int { - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - if data[i] == '>' { - if data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *parser) quote(out *bytes.Buffer, data []byte) int { - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for data[end] != '\n' { - if p.flags&EXTENSION_FENCED_CODE != 0 { - if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - end++ - - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - - var cooked bytes.Buffer - p.block(&cooked, raw.Bytes()) - p.r.BlockQuote(out, cooked.Bytes()) - return end -} - -// returns prefix length for block code -func (p *parser) codePrefix(data []byte) int { - if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *parser) code(out *bytes.Buffer, data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for data[i] != '\n' { - i++ - } - i++ - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffeu - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - p.r.BlockCode(out, work.Bytes(), "") - - return i -} - -// returns unordered list item prefix -func (p *parser) uliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // need a *, +, or - followed by a space - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - data[i+1] != ' ' { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *parser) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for data[i] >= '0' && data[i] <= '9' { - i++ - } - - // we need >= 1 digits followed by a dot and a space - if start == i || data[i] != '.' || data[i+1] != ' ' { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *parser) dliPrefix(data []byte) int { - i := 0 - - // need a : followed by a spaces - if data[i] != ':' || data[i+1] != ' ' { - return 0 - } - for data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { - i := 0 - flags |= LIST_ITEM_BEGINNING_OF_LIST - work := func() bool { - for i < len(data) { - skip := p.listItem(out, data[i:], &flags) - i += skip - - if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { - break - } - flags &= ^LIST_ITEM_BEGINNING_OF_LIST - } - return true - } - - p.r.List(out, work, flags) - return i -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { - // keep track of the indentation of the first line - itemIndent := 0 - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^LIST_TYPE_TERM - } - } - if i == 0 { - // if in defnition list, set term flag and continue - if *flags&LIST_TYPE_DEFINITION != 0 { - *flags |= LIST_TYPE_TERM - } else { - return 0 - } - } - - // skip leading whitespace on first line - for data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - raw.Write(data[line:i]) - line = i - continue - } - - // calculate the indentation - indent := 0 - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - } - - chunk := data[line+indent : i] - - if p.flags&EXTENSION_FENCED_CODE != 0 { - // determine if in or out of codeblock - // if in codeblock, ignore normal list processing - _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) - if marker != "" { - if codeBlockMarker == "" { - // start of codeblock - codeBlockMarker = marker - } else { - // end of codeblock. - *flags |= LIST_ITEM_CONTAINS_BLOCK - codeBlockMarker = "" - } - } - // we are in a codeblock, write line, and continue - if codeBlockMarker != "" || marker != "" { - raw.Write(data[line+indent : i]) - line = i - continue gatherlines - } - } - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - if containsBlankLine { - // end the list if the type changed after a blank line - if indent <= itemIndent && - ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || - (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { - - *flags |= LIST_ITEM_END_OF_LIST - break gatherlines - } - *flags |= LIST_ITEM_CONTAINS_BLOCK - } - - // to be a nested list, it must be indented more - // if not, it is the next item in the same list - if indent <= itemIndent { - break gatherlines - } - - // is this the first item in the nested list? - if sublist == 0 { - sublist = raw.Len() - } - - // is this a nested prefix header? - case p.isPrefixHeader(chunk): - // if the header is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= LIST_ITEM_END_OF_LIST - break gatherlines - } - *flags |= LIST_ITEM_CONTAINS_BLOCK - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= LIST_ITEM_END_OF_LIST - } - } else { - *flags |= LIST_ITEM_END_OF_LIST - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - *flags |= LIST_ITEM_CONTAINS_BLOCK - } - - containsBlankLine = false - - // add the line into the working buffer without prefix - raw.Write(data[line+indent : i]) - - line = i - } - - // If reached end of data, the Renderer.ListItem call we're going to make below - // is definitely the last in the list. - if line >= len(data) { - *flags |= LIST_ITEM_END_OF_LIST - } - - rawBytes := raw.Bytes() - - // render the contents of the list item - var cooked bytes.Buffer - if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(&cooked, rawBytes[:sublist]) - p.block(&cooked, rawBytes[sublist:]) - } else { - p.block(&cooked, rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - p.inline(&cooked, rawBytes[:sublist]) - p.block(&cooked, rawBytes[sublist:]) - } else { - p.inline(&cooked, rawBytes) - } - } - - // render the actual list item - cookedBytes := cooked.Bytes() - parsedEnd := len(cookedBytes) - - // strip trailing newlines - for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { - parsedEnd-- - } - p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) - - return line -} - -// render a single paragraph that has already been parsed out -func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - // trim trailing newline - end := len(data) - 1 - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - work := func() bool { - p.inline(out, data[beg:end]) - return true - } - p.r.Paragraph(out, work) -} - -func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(out, data[prev:], LIST_TYPE_DEFINITION) - } - } - - p.renderParagraph(out, data[:i]) - return i + n - } - - // an underline under some text marks a header, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeader(current); level > 0 { - // render the paragraph - p.renderParagraph(out, data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - // render the header - // this ugly double closure avoids forcing variables onto the heap - work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { - return func() bool { - pp.inline(o, d) - return true - } - }(out, p, data[prev:eol]) - - id := "" - if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { - id = SanitizedAnchorName(string(data[prev:eol])) - } - - p.r.Header(out, work, level, id) - - // find the end of the underline - for data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { - if data[i] == '<' && p.html(out, current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(out, data[:i]) - return i - } - } - - // if there's a prefixed header or a horizontal rule after this, paragraph is over - if p.isPrefixHeader(current) || p.isHRule(current) { - p.renderParagraph(out, data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.flags&EXTENSION_FENCED_CODE != 0 { - if p.fencedCodeBlock(out, current, false) > 0 { - p.renderParagraph(out, data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if p.dliPrefix(current) != 0 { - return p.list(out, data[prev:], LIST_TYPE_DEFINITION) - } - } - - // if there's a list after this, paragraph is over - if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(out, data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - for data[i] != '\n' { - i++ - } - i++ - } - - p.renderParagraph(out, data[:i]) - return i -} - -// SanitizedAnchorName returns a sanitized anchor name for the given text. -// -// It implements the algorithm specified in the package comment. -func SanitizedAnchorName(text string) string { - var anchorName []rune - futureDash := false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go deleted file mode 100644 index 9656c42a1..000000000 --- a/vendor/github.com/russross/blackfriday/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package blackfriday is a Markdown processor. -// -// It translates plain text with simple formatting rules into HTML or LaTeX. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that preceed the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod deleted file mode 100644 index b05561a06..000000000 --- a/vendor/github.com/russross/blackfriday/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/russross/blackfriday diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go deleted file mode 100644 index e0a6c69c9..000000000 --- a/vendor/github.com/russross/blackfriday/html.go +++ /dev/null @@ -1,938 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" -) - -// Html renderer configuration options. -const ( - HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks - HTML_SKIP_STYLE // skip embedded - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index 9bf4286c7..000000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - "sync" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl().Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -var distTmplCache *template.Template -var distTmplOnce sync.Once - -func distTmpl() *template.Template { - distTmplOnce.Do(func() { - // Input: data - distTmplCache = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) - }) - return distTmplCache -} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index 3ebf6f2da..000000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1130 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace // import "golang.org/x/net/trace" - -import ( - "bytes" - "context" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "net/url" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// HTTP ServeMux paths. -const ( - debugRequestsPath = "/debug/requests" - debugEventsPath = "/debug/events" -) - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customize its authorization requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) - if pat == debugRequestsPath { - panic("/debug/requests is already registered. You may have two independent copies of " + - "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + - "involve a vendored copy of golang.org/x/net/trace.") - } - - // TODO(jbd): Serve Traces from /debug/traces in the future? - // There is no requirement for a request to be present to have traces. - http.HandleFunc(debugRequestsPath, Traces) - http.HandleFunc(debugEventsPath, Events) -} - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - -// Traces responds with traces from the program. -// The package initialization registers it in http.DefaultServeMux -// at /debug/requests. -// -// It performs authorization by running AuthRequest. -func Traces(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) -} - -// Events responds with a page of events collected by EventLogs. -// The package initialization registers it in http.DefaultServeMux -// at /debug/events. -// -// It performs authorization by running AuthRequest. -func Events(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Traces handler. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.maxEvents = maxEventsPerTrace - tr.events = tr.eventsBuf[:0] - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - elapsed := time.Now().Sub(tr.Start) - tr.mu.Lock() - tr.Elapsed = elapsed - tr.mu.Unlock() - - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - tr.mu.RLock() // protects tr fields in Cond.match calls - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - tr.mu.RUnlock() - - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - Sensitive bool // whether this event contains sensitive information - What interface{} // string or fmt.Stringer -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Start time of the this trace. - Start time.Time - - mu sync.RWMutex - events []event // Append-only sequence of events (modulo discards). - maxEvents int - recycler func(interface{}) - IsError bool // Whether this trace resulted in an error. - Elapsed time.Duration // Elapsed time for this trace, zero while active. - traceID uint64 // Trace information if non-zero. - spanID uint64 - - refs int32 // how many buckets this is in - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set - - eventsBuf [4]event // preallocated buffer in case we only log a few events -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - - tr.mu.Lock() - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.maxEvents = 0 - tr.events = nil - tr.recycler = nil - tr.mu.Unlock() - - tr.refs = 0 - tr.disc = 0 - tr.finishStack = nil - for i := range tr.eventsBuf { - tr.eventsBuf[i] = event{} - } -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a trace.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < tr.maxEvents { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((tr.maxEvents - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[tr.maxEvents-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { - tr.mu.Lock() - tr.IsError = true - tr.mu.Unlock() -} - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.mu.Lock() - tr.recycler = f - tr.mu.Unlock() -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.mu.Lock() - tr.traceID, tr.spanID = traceID, spanID - tr.mu.Unlock() -} - -func (tr *trace) SetMaxEvents(m int) { - tr.mu.Lock() - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.maxEvents = m - } - tr.mu.Unlock() -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - tr.mu.RLock() - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - tr.mu.RUnlock() - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - tr.mu.RLock() - t := tr.Elapsed - tr.mu.RUnlock() - - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmplCache *template.Template -var pageTmplOnce sync.Once - -func pageTmpl() *template.Template { - pageTmplOnce.Do(func() { - pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, - }).Parse(pageHTML)) - }) - return pageTmplCache -} - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml deleted file mode 100644 index fa139db22..000000000 --- a/vendor/golang.org/x/oauth2/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - tip - -install: - - export GOPATH="$HOME/gopath" - - mkdir -p "$GOPATH/src/golang.org/x" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - - go get -v -t -d golang.org/x/oauth2/... - -script: - - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index dfbed62cf..000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index 0f443e693..000000000 --- a/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) -[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - -## Policy for new packages - -We no longer accept new provider-specific packages in this repo. For -defining provider endpoints and provider-specific OAuth2 behavior, we -encourage you to create packages elsewhere. We'll keep the existing -packages for compatibility. - -## Report Issues / Send Patches - -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - -The main issue tracker for the oauth2 repository is located at -https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod deleted file mode 100644 index b34578155..000000000 --- a/vendor/golang.org/x/oauth2/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module golang.org/x/oauth2 - -go 1.11 - -require ( - cloud.google.com/go v0.34.0 - golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - google.golang.org/appengine v1.4.0 -) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum deleted file mode 100644 index 6f0079b0d..000000000 --- a/vendor/golang.org/x/oauth2/go.sum +++ /dev/null @@ -1,12 +0,0 @@ -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index feb1157b1..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "time" - - "golang.org/x/oauth2" -) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineAppIDFunc func(c context.Context) string - -// AppEngineTokenSource returns a token source that fetches tokens from either -// the current application's service account or from the metadata server, -// depending on the App Engine environment. See below for environment-specific -// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that -// involves user accounts, see oauth2.Config instead. -// -// First generation App Engine runtimes (<= Go 1.9): -// AppEngineTokenSource returns a token source that fetches tokens issued to the -// current App Engine application's service account. The provided context must have -// come from appengine.NewContext. -// -// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: -// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the -// flexible environment. It delegates to ComputeTokenSource, and the provided -// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, -// which DefaultTokenSource will use in this case) instead. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - return appEngineTokenSource(ctx, scope...) -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go deleted file mode 100644 index 83dacac32..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// This file applies to App Engine first generation runtimes (<= Go 1.9). - -package google - -import ( - "context" - "sort" - "strings" - "sync" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &gaeTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type gaeTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go deleted file mode 100644 index 04c2c2216..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. - -package google - -import ( - "context" - "log" - "sync" - - "golang.org/x/oauth2" -) - -var logOnce sync.Once // only spam about deprecation once - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - logOnce.Do(func() { - log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") - }) - return ComputeTokenSource("") -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go deleted file mode 100644 index ad2c09236..000000000 --- a/vendor/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" -) - -// Credentials holds Google credentials, including "Application Default Credentials". -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -type Credentials struct { - ProjectID string // may be empty - TokenSource oauth2.TokenSource - - // JSON contains the raw bytes from a JSON credentials file. - // This field may be nil if authentication is provided by the - // environment and not with a credentials file, e.g. when code is - // running on Google Cloud Platform. - JSON []byte -} - -// DefaultCredentials is the old name of Credentials. -// -// Deprecated: use Credentials instead. -type DefaultCredentials = Credentials - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource returns the token source for -// "Application Default Credentials". -// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - creds, err := FindDefaultCredentials(ctx, scope...) - if err != nil { - return nil, err - } - return creds.TokenSource, nil -} - -// FindDefaultCredentials searches for "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes -// (>= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, scopes) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return creds, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) - // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) - // and App Engine flexible use ComputeTokenSource and the metadata server. - if appengineTokenFunc != nil { - return &DefaultCredentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, scopes...), - }, nil - } - - // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, - // or App Engine flexible, use the metadata server. - if metadata.OnGCE() { - id, _ := metadata.ProjectID() - return &DefaultCredentials{ - ProjectID: id, - TokenSource: ComputeTokenSource("", scopes...), - }, nil - } - - // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) -} - -// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { - var f credentialsFile - if err := json.Unmarshal(jsonData, &f); err != nil { - return nil, err - } - ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) - if err != nil { - return nil, err - } - return &DefaultCredentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, - }, nil -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return CredentialsFromJSON(ctx, b, scopes...) -} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go deleted file mode 100644 index 73be62903..000000000 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and authenticated -// HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, and Google -// App Engine service accounts. -// -// A brief overview of the package follows. For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -// -// OAuth2 Configs -// -// Two functions in this package return golang.org/x/oauth2.Config values from Google credential -// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, -// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or -// create an http.Client. -// -// -// Credentials -// -// The Credentials type represents Google credentials, including Application Default -// Credentials. -// -// Use FindDefaultCredentials to obtain Application Default Credentials. -// FindDefaultCredentials looks in some well-known places for a credentials file, and -// will call AppEngineTokenSource or ComputeTokenSource as needed. -// -// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, -// then use the credentials to construct an http.Client or an oauth2.TokenSource. -// -// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats -// described in OAuth2 Configs, above. The TokenSource in the returned value is the -// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or -// JWTConfigFromJSON, but the Credentials may contain additional information -// that is useful is some circumstances. -package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go deleted file mode 100644 index 6eb2aa95f..000000000 --- a/vendor/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/url" - "strings" - "time" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" -) - -// Endpoint is Google's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, -} - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://oauth2.googleapis.com/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloaded from -// https://console.developers.google.com, under "Credentials". Download the Web -// application credentials in the JSON format and provide the contents of the -// file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" for your project at -// https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var f credentialsFile - if err := json.Unmarshal(jsonKey, &f); err != nil { - return nil, err - } - if f.Type != serviceAccountKey { - return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) - } - scope = append([]string(nil), scope...) // copy - return f.jwtConfig(scope), nil -} - -// JSON key file types. -const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" -) - -// credentialsFile is the unmarshalled representation of a credentials file. -type credentialsFile struct { - Type string `json:"type"` // serviceAccountKey or userCredentialsKey - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` - - // User Credential fields - // (These typically come from gcloud auth.) - ClientSecret string `json:"client_secret"` - ClientID string `json:"client_id"` - RefreshToken string `json:"refresh_token"` -} - -func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { - cfg := &jwt.Config{ - Email: f.ClientEmail, - PrivateKey: []byte(f.PrivateKey), - PrivateKeyID: f.PrivateKeyID, - Scopes: scopes, - TokenURL: f.TokenURL, - } - if cfg.TokenURL == "" { - cfg.TokenURL = JWTTokenURL - } - return cfg -} - -func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { - switch f.Type { - case serviceAccountKey: - cfg := f.jwtConfig(scopes) - return cfg.TokenSource(ctx), nil - case userCredentialsKey: - cfg := &oauth2.Config{ - ClientID: f.ClientID, - ClientSecret: f.ClientSecret, - Scopes: scopes, - Endpoint: Endpoint, - } - tok := &oauth2.Token{RefreshToken: f.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", f.Type) - } -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// If no scopes are specified, a set of default scopes are automatically granted. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) -} - -type computeSource struct { - account string - scopes []string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenURI := "instance/service-accounts/" + acct + "/token" - if len(cs.scopes) > 0 { - v := url.Values{} - v.Set("scopes", strings.Join(cs.scopes, ",")) - tokenURI = tokenURI + "?" + v.Encode() - } - tokenJSON, err := metadata.Get(tokenURI) - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - return &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go deleted file mode 100644 index b0fdb3a88..000000000 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "crypto/rsa" - "fmt" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The audience is typically a URL that specifies the scope of the credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { - cfg, err := JWTConfigFromJSON(jsonKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse JSON key: %v", err) - } - pk, err := internal.ParseKey(cfg.PrivateKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse key: %v", err) - } - ts := &jwtAccessTokenSource{ - email: cfg.Email, - audience: audience, - pk: pk, - pkID: cfg.PrivateKeyID, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil -} - -type jwtAccessTokenSource struct { - email, audience string - pk *rsa.PrivateKey - pkID string -} - -func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { - iat := time.Now() - exp := iat.Add(time.Hour) - cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), - } - hdr := &jws.Header{ - Algorithm: "RS256", - Typ: "JWT", - KeyID: string(ts.pkID), - } - msg, err := jws.Encode(hdr, cs, ts.pk) - if err != nil { - return nil, fmt.Errorf("google: could not encode JWT: %v", err) - } - return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil -} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index 456224bc7..000000000 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/oauth2" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := parseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -func parseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": {}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - if v := os.Getenv("HOME"); v != "" { - return v - } - // Else, fall back to user.Current: - if u, err := user.Current(); err == nil { - return u.HomeDir - } - return "" -} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go deleted file mode 100644 index 743487188..000000000 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import "google.golang.org/appengine/urlfetch" - -func init() { - appengineClientHook = urlfetch.Client -} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go deleted file mode 100644 index 03265e888..000000000 --- a/vendor/golang.org/x/oauth2/internal/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index c0ab196cf..000000000 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go deleted file mode 100644 index 355c38696..000000000 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/context/ctxhttp" -) - -// Token represents the credentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// This type is a mirror of oauth2.Token and exists to break -// an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time - - // Raw optionally contains extra metadata from the server - // when updating a token. - Raw interface{} -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - if len(b) == 0 || string(b) == "null" { - return nil - } - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - if i > math.MaxInt32 { - i = math.MaxInt32 - } - *e = expirationTime(i) - return nil -} - -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - -// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. -type AuthStyle int - -const ( - AuthStyleUnknown AuthStyle = 0 - AuthStyleInParams AuthStyle = 1 - AuthStyleInHeader AuthStyle = 2 -) - -// authStyleCache is the set of tokenURLs we've successfully used via -// RetrieveToken and which style auth we ended up using. -// It's called a cache, but it doesn't (yet?) shrink. It's expected that -// the set of OAuth2 servers a program contacts over time is fixed and -// small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil -} - -// lookupAuthStyle reports which auth style we last used with tokenURL -// when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] - return -} - -// setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) - } - authStyleCache.m[tokenURL] = v -} - -// newTokenRequest returns a new *http.Request to retrieve a new token -// from tokenURL using the provided clientID, clientSecret, and POST -// body parameters. -// -// inParams is whether the clientID & clientSecret should be encoded -// as the POST body. An 'inParams' value of true means to send it in -// the POST body (along with any values in v); false means to send it -// in the Authorization header. -func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { - if authStyle == AuthStyleInParams { - v = cloneURLValues(v) - if clientID != "" { - v.Set("client_id", clientID) - } - if clientSecret != "" { - v.Set("client_secret", clientSecret) - } - } - req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if authStyle == AuthStyleInHeader { - req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) - } - return req, nil -} - -func cloneURLValues(v url.Values) url.Values { - v2 := make(url.Values, len(v)) - for k, vv := range v { - v2[k] = append([]string(nil), vv...) - } - return v2 -} - -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 - if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { - authStyle = style - needsAuthStyleProbe = false - } else { - authStyle = AuthStyleInHeader // the first way we'll try - } - } - req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) - if err != nil { - return nil, err - } - token, err := doTokenRoundTrip(ctx, req) - if err != nil && needsAuthStyleProbe { - // If we get an error, assume the server wants the - // clientID & clientSecret in a different form. - // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. - // In summary: - // - Reddit only accepts client secret in the Authorization header - // - Dropbox accepts either it in URL param or Auth header, but not both. - // - Google only accepts URL param (not spec compliant?), not Auth header - // - Stripe only accepts client secret in Auth header with Bearer method, not Basic - // - // We used to maintain a big table in this code of all the sites and which way - // they went, but maintaining it didn't scale & got annoying. - // So just try both ways. - authStyle = AuthStyleInParams // the second way we'll try - req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) - token, err = doTokenRoundTrip(ctx, req) - } - if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token != nil && token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, err -} - -func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) - if err != nil { - return nil, err - } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - r.Body.Close() - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - Raw: vals, - } - e := vals.Get("expires_in") - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - Raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.Raw) // no error checks for optional fields - } - if token.AccessToken == "" { - return nil, errors.New("oauth2: server response missing access_token") - } - return token, nil -} - -type RetrieveError struct { - Response *http.Response - Body []byte -} - -func (r *RetrieveError) Error() string { - return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go deleted file mode 100644 index 572074a63..000000000 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "net/http" -) - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient ContextKey - -// ContextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a ContextKey, being unexported. -type ContextKey struct{} - -var appengineClientHook func(context.Context) *http.Client - -func ContextClient(ctx context.Context) *http.Client { - if ctx != nil { - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc - } - } - if appengineClientHook != nil { - return appengineClientHook(ctx) - } - return http.DefaultClient -} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index 683d2d271..000000000 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides a partial implementation -// of JSON Web Signature encoding and decoding. -// It exists to support the golang.org/x/oauth2 package. -// -// See RFC 7515. -// -// Deprecated: this package is not intended for public use and might be -// removed in the future. It exists for internal use only. -// Please switch to another JWS package or copy this package into your own -// source tree. -package jws // import "golang.org/x/oauth2/jws" - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) - Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` -} - -func (c *ClaimSet) encode() (string, error) { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - if c.Iat == 0 { - c.Iat = now.Unix() - } - if c.Exp == 0 { - c.Exp = now.Add(time.Hour).Unix() - } - if c.Exp < c.Iat { - return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) - } - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64.RawURLEncoding.EncodeToString(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` - - // The optional hint of which key is being used. - KeyID string `json:"kid,omitempty"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64.RawURLEncoding.DecodeString(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Signer returns a signature for the given data. -type Signer func(data []byte) (sig []byte, err error) - -// EncodeWithSigner encodes a header and claim set with the provided signer. -func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - sig, err := sg([]byte(ss)) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil -} - -// Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. -func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - sg := func(data []byte) (sig []byte, err error) { - h := sha256.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) - } - return EncodeWithSigner(header, c, sg) -} - -// Verify tests whether the provided JWT token's signature was produced by the private key -// associated with the supplied public key. -func Verify(token string, key *rsa.PublicKey) error { - parts := strings.Split(token, ".") - if len(parts) != 3 { - return errors.New("jws: invalid token received, token must have 3 parts") - } - - signedContent := parts[0] + "." + parts[1] - signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) - if err != nil { - return err - } - - h := sha256.New() - h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) -} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index 99f3e0a32..000000000 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // PrivateKeyID contains an optional hint indicating which key is being - // used. - PrivateKeyID string - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string - - // Expires optionally specifies how long the token is valid for. - Expires time.Duration - - // Audience optionally specifies the intended audience of the - // request. If empty, the value of TokenURL is used as the - // intended audience. - Audience string -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - if t := js.conf.Expires; t > 0 { - claimSet.Exp = time.Now().Add(t).Unix() - } - if aud := js.conf.Audience; aud != "" { - claimSet.Aud = aud - } - h := *defaultHeader - h.KeyID = js.conf.PrivateKeyID - payload, err := jws.Encode(&h, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, &oauth2.RetrieveError{ - Response: resp, - Body: body, - } - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index 428283f0b..000000000 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests, -// as specified in RFC 6749. -// It can additionally grant authorization with Bearer JWT. -package oauth2 // import "golang.org/x/oauth2" - -import ( - "bytes" - "context" - "errors" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/oauth2/internal" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -// -// Deprecated: Use context.Background() or context.TODO() instead. -var NoContext = context.TODO() - -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint represents an OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - TokenURL string - - // AuthStyle optionally specifies how the endpoint wants the - // client ID & client secret sent. The zero value means to - // auto-detect. - AuthStyle AuthStyle -} - -// AuthStyle represents how requests for tokens are authenticated -// to the server. -type AuthStyle int - -const ( - // AuthStyleAutoDetect means to auto-detect which authentication - // style the provider wants by trying both ways and caching - // the successful way for the future. - AuthStyleAutoDetect AuthStyle = 0 - - // AuthStyleInParams sends the "client_id" and "client_secret" - // in the POST body as application/x-www-form-urlencoded parameters. - AuthStyleInParams AuthStyle = 1 - - // AuthStyleInHeader sends the client_id and client_password - // using HTTP Basic Authorization. This is an optional style - // described in the OAuth2 RFC 6749 section 2.3.1. - AuthStyleInHeader AuthStyle = 2 -) - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - } - if c.RedirectURL != "" { - v.Set("redirect_uri", c.RedirectURL) - } - if len(c.Scopes) > 0 { - v.Set("scope", strings.Join(c.Scopes, " ")) - } - if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. - v.Set("state", state) - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - v := url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - } - if len(c.Scopes) > 0 { - v.Set("scope", strings.Join(c.Scopes, " ")) - } - return retrieveToken(ctx, c, v) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). -// -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. -func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { - v := url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - } - if c.RedirectURL != "" { - v.Set("redirect_uri", c.RedirectURL) - } - for _, opt := range opts { - opt.setValue(v) - } - return retrieveToken(ctx, c, v) -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, nil -} - -// StaticTokenSource returns a TokenSource that always returns the same token. -// Because the provided token t is never refreshed, StaticTokenSource is only -// useful for tokens that never expire. -func StaticTokenSource(t *Token) TokenSource { - return staticTokenSource{t} -} - -// staticTokenSource is a TokenSource that always returns the same Token. -type staticTokenSource struct { - t *Token -} - -func (s staticTokenSource) Token() (*Token, error) { - return s.t, nil -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient internal.ContextKey - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// Note that if a custom *http.Client is provided via the Context it -// is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - return internal.ContextClient(ctx) - } - return &http.Client{ - Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go deleted file mode 100644 index 822720341..000000000 --- a/vendor/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/oauth2/internal" -) - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second - -// Token represents the credentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if strings.EqualFold(t.TokenType, "bearer") { - return "Bearer" - } - if strings.EqualFold(t.TokenType, "mac") { - return "MAC" - } - if strings.EqualFold(t.TokenType, "basic") { - return "Basic" - } - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - - vals, ok := t.raw.(url.Values) - if !ok { - return nil - } - - v := vals.Get(key) - switch s := strings.TrimSpace(v); strings.Count(s, ".") { - case 0: // Contains no "."; try to parse as int - if i, err := strconv.ParseInt(s, 10, 64); err == nil { - return i - } - case 1: // Contains a single "."; try to parse as float - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - } - - return v -} - -// timeNow is time.Now but pulled out as a variable for tests. -var timeNow = time.Now - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} - -// tokenFromInternal maps an *internal.Token struct into -// a *Token struct. -func tokenFromInternal(t *internal.Token) *Token { - if t == nil { - return nil - } - return &Token{ - AccessToken: t.AccessToken, - TokenType: t.TokenType, - RefreshToken: t.RefreshToken, - Expiry: t.Expiry, - raw: t.Raw, - } -} - -// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. -// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) - if err != nil { - if rErr, ok := err.(*internal.RetrieveError); ok { - return nil, (*RetrieveError)(rErr) - } - return nil, err - } - return tokenFromInternal(tk), nil -} - -// RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. -type RetrieveError struct { - Response *http.Response - // Body is the body that was consumed by reading Response.Body. - // It may be truncated. - Body []byte -} - -func (r *RetrieveError) Error() string { - return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) -} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go deleted file mode 100644 index aa0d34f1e..000000000 --- a/vendor/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token from Transport's Source. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - reqBodyClosed := false - if req.Body != nil { - defer func() { - if !reqBodyClosed { - req.Body.Close() - } - }() - } - - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - - // req.Body is assumed to have been closed by the base RoundTripper. - reqBodyClosed = true - - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 9857fe53d..000000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -package errgroup - -import ( - "context" - "sync" -) - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid and does not cancel on error. -type Group struct { - cancel func() - - wg sync.WaitGroup - - errOnce sync.Once - err error -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel() - } - return g.err -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func() error) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() -} diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/sys/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/sys/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/sys/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/sys/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go deleted file mode 100644 index da6b9e436..000000000 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "encoding/binary" - "runtime" -) - -// hostByteOrder returns binary.LittleEndian on little-endian machines and -// binary.BigEndian on big-endian machines. -func hostByteOrder() binary.ByteOrder { - switch runtime.GOARCH { - case "386", "amd64", "amd64p32", - "arm", "arm64", - "mipsle", "mips64le", "mips64p32le", - "ppc64le", - "riscv", "riscv64": - return binary.LittleEndian - case "armbe", "arm64be", - "mips", "mips64", "mips64p32", - "ppc", "ppc64", - "s390", "s390x", - "sparc", "sparc64": - return binary.BigEndian - } - panic("unknown architecture") -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go deleted file mode 100644 index 679e78c2c..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpu implements processor feature detection for -// various CPU architectures. -package cpu - -// Initialized reports whether the CPU features were initialized. -// -// For some GOOS/GOARCH combinations initialization of the CPU features depends -// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm -// Initialized will report false if reading the file fails. -var Initialized bool - -// CacheLinePad is used to pad structs to avoid false sharing. -type CacheLinePad struct{ _ [cacheLineSize]byte } - -// X86 contains the supported CPU features of the -// current X86/AMD64 platform. If the current platform -// is not X86/AMD64 then all feature flags are false. -// -// X86 is padded to avoid false sharing. Further the HasAVX -// and HasAVX2 are only set if the OS supports XMM and YMM -// registers in addition to the CPUID feature bit being set. -var X86 struct { - _ CacheLinePad - HasAES bool // AES hardware implementation (AES NI) - HasADX bool // Multi-precision add-carry instruction extensions - HasAVX bool // Advanced vector extension - HasAVX2 bool // Advanced vector extension 2 - HasBMI1 bool // Bit manipulation instruction set 1 - HasBMI2 bool // Bit manipulation instruction set 2 - HasERMS bool // Enhanced REP for MOVSB and STOSB - HasFMA bool // Fused-multiply-add instructions - HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. - HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM - HasPOPCNT bool // Hamming weight instruction POPCNT. - HasRDRAND bool // RDRAND instruction (on-chip random number generator) - HasRDSEED bool // RDSEED instruction (on-chip random number generator) - HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) - HasSSE3 bool // Streaming SIMD extension 3 - HasSSSE3 bool // Supplemental streaming SIMD extension 3 - HasSSE41 bool // Streaming SIMD extension 4 and 4.1 - HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - _ CacheLinePad -} - -// ARM64 contains the supported CPU features of the -// current ARMv8(aarch64) platform. If the current platform -// is not arm64 then all feature flags are false. -var ARM64 struct { - _ CacheLinePad - HasFP bool // Floating-point instruction set (always available) - HasASIMD bool // Advanced SIMD (always available) - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - HasATOMICS bool // Atomic memory operation instruction set - HasFPHP bool // Half precision floating-point instruction set - HasASIMDHP bool // Advanced SIMD half precision instruction set - HasCPUID bool // CPUID identification scheme registers - HasASIMDRDM bool // Rounding double multiply add/subtract instruction set - HasJSCVT bool // Javascript conversion from floating-point to integer - HasFCMA bool // Floating-point multiplication and addition of complex numbers - HasLRCPC bool // Release Consistent processor consistent support - HasDCPOP bool // Persistent memory support - HasSHA3 bool // SHA3 hardware implementation - HasSM3 bool // SM3 hardware implementation - HasSM4 bool // SM4 hardware implementation - HasASIMDDP bool // Advanced SIMD double precision instruction set - HasSHA512 bool // SHA512 hardware implementation - HasSVE bool // Scalable Vector Extensions - HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 - _ CacheLinePad -} - -// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. -// If the current platform is not ppc64/ppc64le then all feature flags are false. -// -// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (DARN, SCV), so there are feature bits for -// those as well. The minimum processor requirement is POWER8 (ISA 2.07). -// The struct is padded to avoid false sharing. -var PPC64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9) - _ CacheLinePad -} - -// S390X contains the supported CPU features of the current IBM Z -// (s390x) platform. If the current platform is not IBM Z then all -// feature flags are false. -// -// S390X is padded to avoid false sharing. Further HasVX is only set -// if the OS supports vector registers in addition to the STFLE -// feature bit being set. -var S390X struct { - _ CacheLinePad - HasZARCH bool // z/Architecture mode is active [mandatory] - HasSTFLE bool // store facility list extended - HasLDISP bool // long (20-bit) displacements - HasEIMM bool // 32-bit immediates - HasDFP bool // decimal floating point - HasETF3EH bool // ETF-3 enhanced - HasMSA bool // message security assist (CPACF) - HasAES bool // KM-AES{128,192,256} functions - HasAESCBC bool // KMC-AES{128,192,256} functions - HasAESCTR bool // KMCTR-AES{128,192,256} functions - HasAESGCM bool // KMA-GCM-AES{128,192,256} functions - HasGHASH bool // KIMD-GHASH function - HasSHA1 bool // K{I,L}MD-SHA-1 functions - HasSHA256 bool // K{I,L}MD-SHA-256 functions - HasSHA512 bool // K{I,L}MD-SHA-512 functions - HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions - HasVX bool // vector facility - HasVXE bool // vector-enhancements facility 1 - _ CacheLinePad -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go deleted file mode 100644 index d8c26a048..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build aix,ppc64 - -package cpu - -import "golang.org/x/sys/unix" - -const cacheLineSize = 128 - -const ( - // getsystemcfg constants - _SC_IMPL = 2 - _IMPL_POWER8 = 0x10000 - _IMPL_POWER9 = 0x20000 -) - -func init() { - impl := unix.Getsystemcfg(_SC_IMPL) - if impl&_IMPL_POWER8 != 0 { - PPC64.IsPOWER8 = true - } - if impl&_IMPL_POWER9 != 0 { - PPC64.IsPOWER9 = true - } - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go deleted file mode 100644 index 7f2348b7d..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go deleted file mode 100644 index 568bcd031..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return true } - -// The following feature detection functions are defined in cpu_s390x.s. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList -func kmQuery() queryResult -func kmcQuery() queryResult -func kmctrQuery() queryResult -func kmaQuery() queryResult -func kimdQuery() queryResult -func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go deleted file mode 100644 index f7cb46971..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build !gccgo - -package cpu - -// cpuid is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) - -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c deleted file mode 100644 index e363c7d13..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build gccgo - -#include -#include - -// Need to wrap __get_cpuid_count because it's declared as static. -int -gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); -} - -// xgetbv reads the contents of an XCR (Extended Control Register) -// specified in the ECX register into registers EDX:EAX. -// Currently, the only supported value for XCR is 0. -// -// TODO: Replace with a better alternative: -// -// #include -// -// #pragma GCC target("xsave") -// -// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { -// unsigned long long x = _xgetbv(0); -// *eax = x & 0xffffffff; -// *edx = (x >> 32) & 0xffffffff; -// } -// -// Note that _xgetbv is defined starting with GCC 8. -void -gccgoXgetbv(uint32_t *eax, uint32_t *edx) -{ - __asm(" xorl %%ecx, %%ecx\n" - " xgetbv" - : "=a"(*eax), "=d"(*edx)); -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go deleted file mode 100644 index ba49b91bd..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build gccgo - -package cpu - -//extern gccgoGetCpuidCount -func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) - -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { - var a, b, c, d uint32 - gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) - return a, b, c, d -} - -//extern gccgoXgetbv -func gccgoXgetbv(eax, edx *uint32) - -func xgetbv() (eax, edx uint32) { - var a, d uint32 - gccgoXgetbv(&a, &d) - return a, d -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go deleted file mode 100644 index aa986f778..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return false } - -// TODO(mundaym): the following feature detection functions are currently -// stubs. See https://golang.org/cl/162887 for how to fix this. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList { panic("not implemented for gccgo") } -func kmQuery() queryResult { panic("not implemented for gccgo") } -func kmcQuery() queryResult { panic("not implemented for gccgo") } -func kmctrQuery() queryResult { panic("not implemented for gccgo") } -func kmaQuery() queryResult { panic("not implemented for gccgo") } -func kimdQuery() queryResult { panic("not implemented for gccgo") } -func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go deleted file mode 100644 index 76b5f507f..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !amd64,!amd64p32,!386 - -package cpu - -import ( - "io/ioutil" -) - -const ( - _AT_HWCAP = 16 - _AT_HWCAP2 = 26 - - procAuxv = "/proc/self/auxv" - - uintSize = int(32 << (^uint(0) >> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - -func init() { - buf, err := ioutil.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false - return - } - - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - doinit() - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go deleted file mode 100644 index fa7fb1bd7..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 64 - -// HWCAP/HWCAP2 bits. These are exposed by Linux. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 -) - -func doinit() { - // HWCAP feature bits - ARM64.HasFP = isSet(hwCap, hwcap_FP) - ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwCap, hwcap_AES) - ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwCap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go deleted file mode 100644 index 6c8d975d4..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux -// +build ppc64 ppc64le - -package cpu - -const cacheLineSize = 128 - -// HWCAP/HWCAP2 bits. These are exposed by the kernel. -const ( - // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 - - // CPU features - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 -) - -func doinit() { - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go deleted file mode 100644 index d579eaef4..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 256 - -const ( - // bit mask values from /usr/include/bits/hwcap.h - hwcap_ZARCH = 2 - hwcap_STFLE = 4 - hwcap_MSA = 8 - hwcap_LDISP = 16 - hwcap_EIMM = 32 - hwcap_DFP = 64 - hwcap_ETF3EH = 256 - hwcap_VX = 2048 - hwcap_VXE = 8192 -) - -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // cryptography facilities - msa4 facility = 77 // message-security-assist extension 4 - msa8 facility = 146 // message-security-assist extension 8 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { - // test HWCAP bit vector - has := func(featureMask uint) bool { - return hwCap&featureMask == featureMask - } - - // mandatory - S390X.HasZARCH = has(hwcap_ZARCH) - - // optional - S390X.HasSTFLE = has(hwcap_STFLE) - S390X.HasLDISP = has(hwcap_LDISP) - S390X.HasEIMM = has(hwcap_EIMM) - S390X.HasETF3EH = has(hwcap_ETF3EH) - S390X.HasDFP = has(hwcap_DFP) - S390X.HasMSA = has(hwcap_MSA) - S390X.HasVX = has(hwcap_VX) - if S390X.HasVX { - S390X.HasVXE = has(hwcap_VXE) - } - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go deleted file mode 100644 index f55e0c82c..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build mips64 mips64le - -package cpu - -const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go deleted file mode 100644 index cda87b1a1..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build mips mipsle - -package cpu - -const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go deleted file mode 100644 index dd1e76dc9..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !linux,arm64 - -package cpu - -const cacheLineSize = 64 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s deleted file mode 100644 index e5037d92e..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -#include "textflag.h" - -// func stfle() facilityList -TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 - MOVD $ret+0(FP), R1 - MOVD $3, R0 // last doubleword index to store - XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) - WORD $0xb2b01000 // store facility list extended (STFLE) - RET - -// func kmQuery() queryResult -TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KM-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92E0024 // cipher message (KM) - RET - -// func kmcQuery() queryResult -TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMC-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92F0024 // cipher message with chaining (KMC) - RET - -// func kmctrQuery() queryResult -TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMCTR-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92D4024 // cipher message with counter (KMCTR) - RET - -// func kmaQuery() queryResult -TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMA-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xb9296024 // cipher message with authentication (KMA) - RET - -// func kimdQuery() queryResult -TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KIMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93E0024 // compute intermediate message digest (KIMD) - RET - -// func klmdQuery() queryResult -TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KLMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93F0024 // compute last message digest (KLMD) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go deleted file mode 100644 index bd9bbda0c..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build wasm - -package cpu - -// We're compiling the cpu package for an unknown (software-abstracted) CPU. -// Make CacheLinePad an empty struct and hope that the usual struct alignment -// rules are good enough. - -const cacheLineSize = 0 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go deleted file mode 100644 index d70d317f5..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 - -package cpu - -const cacheLineSize = 64 - -func init() { - Initialized = true - - maxID, _, _, _ := cpuid(0, 0) - - if maxID < 1 { - return - } - - _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) - - osSupportsAVX := false - // For XGETBV, OSXSAVE bit is required and sufficient. - if X86.HasOSXSAVE { - eax, _ := xgetbv() - // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) - } - - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX - - if maxID < 7 { - return - } - - _, ebx7, _, _ := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) -} - -func isSet(bitpos uint, value uint32) bool { - return value&(1<