Use a vendor directory for repeatable builds - fixes #816
This is using godep to manage the vendor directory.
This commit is contained in:
parent
01be5bff02
commit
f7af730b50
637 changed files with 247384 additions and 11 deletions
407
Godeps/Godeps.json
generated
Normal file
407
Godeps/Godeps.json
generated
Normal file
|
@ -0,0 +1,407 @@
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/ncw/rclone",
|
||||||
|
"GoVersion": "go1.7",
|
||||||
|
"GodepVersion": "v74",
|
||||||
|
"Packages": [
|
||||||
|
"./..."
|
||||||
|
],
|
||||||
|
"Deps": [
|
||||||
|
{
|
||||||
|
"ImportPath": "bazil.org/fuse",
|
||||||
|
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "bazil.org/fuse/fs",
|
||||||
|
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "bazil.org/fuse/fuseutil",
|
||||||
|
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "cloud.google.com/go/compute/metadata",
|
||||||
|
"Comment": "v0.1.0-185-gfe3d41e",
|
||||||
|
"Rev": "fe3d41e1ecb2ce36ad3a979037c9b9a2b726226f"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "cloud.google.com/go/internal",
|
||||||
|
"Comment": "v0.1.0-185-gfe3d41e",
|
||||||
|
"Rev": "fe3d41e1ecb2ce36ad3a979037c9b9a2b726226f"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/Unknwon/goconfig",
|
||||||
|
"Rev": "5aa4f8cd5a472c2411c778b4680f59f2223f1966"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/VividCortex/ewma",
|
||||||
|
"Comment": "v1.0-20-gc595cd8",
|
||||||
|
"Rev": "c595cd886c223c6c28fc9ae2727a61b5e4693d85"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
|
||||||
|
"Comment": "v1.4.12-6-g388802d",
|
||||||
|
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
|
||||||
|
"Comment": "v1.0.6",
|
||||||
|
"Rev": "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||||
|
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/go-ini/ini",
|
||||||
|
"Comment": "v1.9.0",
|
||||||
|
"Rev": "193d1ecb466bf97aae8b454a5cfc192941c64809"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
|
"Rev": "98fa357170587e470c5f27d3c3ea0947b71eb455"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/google/go-querystring/query",
|
||||||
|
"Rev": "9235644dd9e52eeae6fa48efd539fdc351a0af53"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||||
|
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/jmespath/go-jmespath",
|
||||||
|
"Comment": "0.2.2-12-g0b12d6b",
|
||||||
|
"Rev": "0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/ncw/go-acd",
|
||||||
|
"Rev": "5622f9ac57985c9fd6e1ad419983c20043e007ce"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/ncw/swift",
|
||||||
|
"Rev": "b964f2ca856aac39885e258ad25aec08d5f64ee6"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/pkg/errors",
|
||||||
|
"Comment": "v0.8.0-1-g839d9e9",
|
||||||
|
"Rev": "839d9e913e063e28dfd0e6c7b7512793e0a48be9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/pmezard/go-difflib/difflib",
|
||||||
|
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/rfjakob/eme",
|
||||||
|
"Comment": "v1.0-10-g601d0e2",
|
||||||
|
"Rev": "601d0e278ceda9aa2085a61c9265f6e690ef5255"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/russross/blackfriday",
|
||||||
|
"Comment": "v1.4-38-g93622da",
|
||||||
|
"Rev": "93622da34e54fb6529bfb7c57e710f37a8d9cbd8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/shurcooL/sanitized_anchor_name",
|
||||||
|
"Rev": "10ef21a441db47d8b13ebcc5fd2310f636973c77"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/skratchdot/open-golang/open",
|
||||||
|
"Rev": "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/spf13/cobra",
|
||||||
|
"Rev": "ec2fe7859914a5106dcab4e7901633d959bfc2f4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/spf13/cobra/doc",
|
||||||
|
"Rev": "ec2fe7859914a5106dcab4e7901633d959bfc2f4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/spf13/pflag",
|
||||||
|
"Rev": "bf8481a6aebc13a8aab52e699ffe2e79771f5a3f"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stacktic/dropbox",
|
||||||
|
"Rev": "58f839b21094d5e0af7caf613599830589233d20"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stretchr/objx",
|
||||||
|
"Rev": "1a9d0bb9f541897e62256577b352fdbc1fb4fd94"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stretchr/testify",
|
||||||
|
"Comment": "v1.1.4-4-g976c720",
|
||||||
|
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stretchr/testify/assert",
|
||||||
|
"Comment": "v1.1.4-4-g976c720",
|
||||||
|
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stretchr/testify/http",
|
||||||
|
"Comment": "v1.1.4-4-g976c720",
|
||||||
|
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stretchr/testify/mock",
|
||||||
|
"Comment": "v1.1.4-4-g976c720",
|
||||||
|
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stretchr/testify/require",
|
||||||
|
"Comment": "v1.1.4-4-g976c720",
|
||||||
|
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/tsenart/tb",
|
||||||
|
"Rev": "19f4c3d79d2bd67d0911b2e310b999eeea4454c1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
|
||||||
|
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||||
|
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/crypto/poly1305",
|
||||||
|
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||||
|
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||||
|
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||||
|
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/net/context",
|
||||||
|
"Rev": "0f2be02c5ddfa7c7936000ad519b04c6c795eab3"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||||
|
"Rev": "0f2be02c5ddfa7c7936000ad519b04c6c795eab3"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/oauth2",
|
||||||
|
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/oauth2/google",
|
||||||
|
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/oauth2/internal",
|
||||||
|
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/oauth2/jws",
|
||||||
|
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/oauth2/jwt",
|
||||||
|
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/sys/unix",
|
||||||
|
"Rev": "9bb9f0998d48b31547d975974935ae9b48c7a03c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
|
"Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
|
"Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/api/drive/v2",
|
||||||
|
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/api/gensupport",
|
||||||
|
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/api/googleapi",
|
||||||
|
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
|
||||||
|
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/api/storage/v1",
|
||||||
|
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine/internal",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine/internal/app_identity",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine/internal/base",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine/internal/datastore",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine/internal/log",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine/internal/modules",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "google.golang.org/appengine/internal/remote_api",
|
||||||
|
"Rev": "c98f627282072b1230c8795abe98e2914c8a1de9"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
This directory tree is generated automatically by godep.
|
||||||
|
|
||||||
|
Please do not edit.
|
||||||
|
|
||||||
|
See https://github.com/tools/godep for more information.
|
21
Makefile
21
Makefile
|
@ -3,11 +3,14 @@ TAG := $(shell echo `git describe --tags`-`git rev-parse --abbrev-ref HEAD` | se
|
||||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
|
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||||
GO_LATEST := $(findstring go1.7,$(GO_VERSION))
|
GO_LATEST := $(findstring go1.7,$(GO_VERSION))
|
||||||
BETA_URL := http://beta.rclone.org/$(TAG)/
|
BETA_URL := http://beta.rclone.org/$(TAG)/
|
||||||
|
# Only needed for Go 1.5
|
||||||
|
export GO15VENDOREXPERIMENT=1
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
go install -v ./...
|
go install -v
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
|
@ -20,28 +23,27 @@ vars:
|
||||||
|
|
||||||
# Full suite of integration tests
|
# Full suite of integration tests
|
||||||
test: rclone
|
test: rclone
|
||||||
go test ./...
|
go test $(GO_FILES)
|
||||||
cd fs && go run test_all.go
|
cd fs && go run test_all.go
|
||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
go test ./...
|
go test $(GO_FILES)
|
||||||
go test -cpu=2 -race ./...
|
go test -cpu=2 -race $(GO_FILES)
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
ifdef GO_LATEST
|
ifdef GO_LATEST
|
||||||
go vet ./...
|
go vet $(GO_FILES)
|
||||||
errcheck ./...
|
errcheck $(GO_FILES)
|
||||||
goimports -d . | grep . ; test $$? -eq 1
|
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||||
golint ./... | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
go list ./... | grep -v /vendor/ | xargs -i golint {} | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||||
else
|
else
|
||||||
@echo Skipping tests as not on Go stable
|
@echo Skipping tests as not on Go stable
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
go get -t ./...
|
|
||||||
ifdef GO_LATEST
|
ifdef GO_LATEST
|
||||||
go get -u github.com/kisielk/errcheck
|
go get -u github.com/kisielk/errcheck
|
||||||
go get -u golang.org/x/tools/cmd/goimports
|
go get -u golang.org/x/tools/cmd/goimports
|
||||||
|
@ -53,6 +55,7 @@ endif
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
update:
|
update:
|
||||||
go get -t -u -f -v ./...
|
go get -t -u -f -v ./...
|
||||||
|
godep save ./...
|
||||||
|
|
||||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||||
|
|
||||||
|
|
|
@ -12,9 +12,9 @@ install:
|
||||||
- echo %GOPATH%
|
- echo %GOPATH%
|
||||||
- go version
|
- go version
|
||||||
- go env
|
- go env
|
||||||
- go get -t -d ./...
|
- go install
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- go vet ./...
|
- rmdir vendor\bazil.org\fuse /s /q
|
||||||
- go test -cpu=2 ./...
|
- go test -cpu=2 ./...
|
||||||
- go test -cpu=2 -short -race ./...
|
- go test -cpu=2 -short -race ./...
|
||||||
|
|
2
vendor/bazil.org/fuse/.gitattributes
generated
vendored
Normal file
2
vendor/bazil.org/fuse/.gitattributes
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
*.go filter=gofmt
|
||||||
|
*.cgo filter=gofmt
|
11
vendor/bazil.org/fuse/.gitignore
generated
vendored
Normal file
11
vendor/bazil.org/fuse/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
*~
|
||||||
|
.#*
|
||||||
|
## the next line needs to start with a backslash to avoid looking like
|
||||||
|
## a comment
|
||||||
|
\#*#
|
||||||
|
.*.swp
|
||||||
|
|
||||||
|
*.test
|
||||||
|
|
||||||
|
/clockfs
|
||||||
|
/hellofs
|
93
vendor/bazil.org/fuse/LICENSE
generated
vendored
Normal file
93
vendor/bazil.org/fuse/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
Copyright (c) 2013-2015 Tommi Virtanen.
|
||||||
|
Copyright (c) 2009, 2011, 2012 The Go Authors.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
The following included software components have additional copyright
|
||||||
|
notices and license terms that may differ from the above.
|
||||||
|
|
||||||
|
|
||||||
|
File fuse.go:
|
||||||
|
|
||||||
|
// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c,
|
||||||
|
// which carries this notice:
|
||||||
|
//
|
||||||
|
// The files in this directory are subject to the following license.
|
||||||
|
//
|
||||||
|
// The author of this software is Russ Cox.
|
||||||
|
//
|
||||||
|
// Copyright (c) 2006 Russ Cox
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose without fee is hereby granted, provided that this entire notice
|
||||||
|
// is included in all copies of any software which is or includes a copy
|
||||||
|
// or modification of this software and in all copies of the supporting
|
||||||
|
// documentation for such software.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
|
||||||
|
// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY
|
||||||
|
// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
|
||||||
|
// FITNESS FOR ANY PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
|
||||||
|
File fuse_kernel.go:
|
||||||
|
|
||||||
|
// Derived from FUSE's fuse_kernel.h
|
||||||
|
/*
|
||||||
|
This file defines the kernel interface of FUSE
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||||
|
|
||||||
|
|
||||||
|
This -- and only this -- header file may also be distributed under
|
||||||
|
the terms of the BSD Licence as follows:
|
||||||
|
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||||
|
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||||
|
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||||
|
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGE.
|
||||||
|
*/
|
23
vendor/bazil.org/fuse/README.md
generated
vendored
Normal file
23
vendor/bazil.org/fuse/README.md
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
bazil.org/fuse -- Filesystems in Go
|
||||||
|
===================================
|
||||||
|
|
||||||
|
`bazil.org/fuse` is a Go library for writing FUSE userspace
|
||||||
|
filesystems.
|
||||||
|
|
||||||
|
It is a from-scratch implementation of the kernel-userspace
|
||||||
|
communication protocol, and does not use the C library from the
|
||||||
|
project called FUSE. `bazil.org/fuse` embraces Go fully for safety and
|
||||||
|
ease of programming.
|
||||||
|
|
||||||
|
Here’s how to get going:
|
||||||
|
|
||||||
|
go get bazil.org/fuse
|
||||||
|
|
||||||
|
Website: http://bazil.org/fuse/
|
||||||
|
|
||||||
|
Github repository: https://github.com/bazil/fuse
|
||||||
|
|
||||||
|
API docs: http://godoc.org/bazil.org/fuse
|
||||||
|
|
||||||
|
Our thanks to Russ Cox for his fuse library, which this project is
|
||||||
|
based on.
|
35
vendor/bazil.org/fuse/buffer.go
generated
vendored
Normal file
35
vendor/bazil.org/fuse/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// buffer provides a mechanism for constructing a message from
|
||||||
|
// multiple segments.
|
||||||
|
type buffer []byte
|
||||||
|
|
||||||
|
// alloc allocates size bytes and returns a pointer to the new
|
||||||
|
// segment.
|
||||||
|
func (w *buffer) alloc(size uintptr) unsafe.Pointer {
|
||||||
|
s := int(size)
|
||||||
|
if len(*w)+s > cap(*w) {
|
||||||
|
old := *w
|
||||||
|
*w = make([]byte, len(*w), 2*cap(*w)+s)
|
||||||
|
copy(*w, old)
|
||||||
|
}
|
||||||
|
l := len(*w)
|
||||||
|
*w = (*w)[:l+s]
|
||||||
|
return unsafe.Pointer(&(*w)[l])
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset clears out the contents of the buffer.
|
||||||
|
func (w *buffer) reset() {
|
||||||
|
for i := range (*w)[:cap(*w)] {
|
||||||
|
(*w)[i] = 0
|
||||||
|
}
|
||||||
|
*w = (*w)[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBuffer(extra uintptr) buffer {
|
||||||
|
const hdrSize = unsafe.Sizeof(outHeader{})
|
||||||
|
buf := make(buffer, hdrSize, hdrSize+extra)
|
||||||
|
return buf
|
||||||
|
}
|
21
vendor/bazil.org/fuse/debug.go
generated
vendored
Normal file
21
vendor/bazil.org/fuse/debug.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func stack() string {
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
return string(buf[:runtime.Stack(buf, false)])
|
||||||
|
}
|
||||||
|
|
||||||
|
func nop(msg interface{}) {}
|
||||||
|
|
||||||
|
// Debug is called to output debug messages, including protocol
|
||||||
|
// traces. The default behavior is to do nothing.
|
||||||
|
//
|
||||||
|
// The messages have human-friendly string representations and are
|
||||||
|
// safe to marshal to JSON.
|
||||||
|
//
|
||||||
|
// Implementations must not retain msg.
|
||||||
|
var Debug func(msg interface{}) = nop
|
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENOATTR = Errno(syscall.ENOATTR)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
errNoXattr = ENOATTR
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
errnoNames[errNoXattr] = "ENOATTR"
|
||||||
|
}
|
15
vendor/bazil.org/fuse/error_freebsd.go
generated
vendored
Normal file
15
vendor/bazil.org/fuse/error_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENOATTR = Errno(syscall.ENOATTR)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
errNoXattr = ENOATTR
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
errnoNames[errNoXattr] = "ENOATTR"
|
||||||
|
}
|
17
vendor/bazil.org/fuse/error_linux.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/error_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENODATA = Errno(syscall.ENODATA)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
errNoXattr = ENODATA
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
errnoNames[errNoXattr] = "ENODATA"
|
||||||
|
}
|
31
vendor/bazil.org/fuse/error_std.go
generated
vendored
Normal file
31
vendor/bazil.org/fuse/error_std.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
// There is very little commonality in extended attribute errors
|
||||||
|
// across platforms.
|
||||||
|
//
|
||||||
|
// getxattr return value for "extended attribute does not exist" is
|
||||||
|
// ENOATTR on OS X, and ENODATA on Linux and apparently at least
|
||||||
|
// NetBSD. There may be a #define ENOATTR on Linux too, but the value
|
||||||
|
// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no
|
||||||
|
// ENODATA, only ENOATTR. ENOATTR is not in any of the standards,
|
||||||
|
// ENODATA exists but is only used for STREAMs.
|
||||||
|
//
|
||||||
|
// Each platform will define it a errNoXattr constant, and this file
|
||||||
|
// will enforce that it implements the right interfaces and hide the
|
||||||
|
// implementation.
|
||||||
|
//
|
||||||
|
// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html
|
||||||
|
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html
|
||||||
|
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
||||||
|
// http://www.freebsd.org/cgi/man.cgi?query=extattr_get_file&sektion=2
|
||||||
|
// http://nixdoc.net/man-pages/openbsd/man2/extattr_get_file.2.html
|
||||||
|
|
||||||
|
// ErrNoXattr is a platform-independent error value meaning the
|
||||||
|
// extended attribute was not found. It can be used to respond to
|
||||||
|
// GetxattrRequest and such.
|
||||||
|
const ErrNoXattr = errNoXattr
|
||||||
|
|
||||||
|
var _ error = ErrNoXattr
|
||||||
|
var _ Errno = ErrNoXattr
|
||||||
|
var _ ErrorNumber = ErrNoXattr
|
1568
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
Normal file
1568
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
99
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
Normal file
99
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
// FUSE directory tree, for servers that wish to use it with the service loop.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
pathpkg "path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bazil.org/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Tree implements a basic read-only directory tree for FUSE.
|
||||||
|
// The Nodes contained in it may still be writable.
|
||||||
|
type Tree struct {
|
||||||
|
tree
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) Root() (Node, error) {
|
||||||
|
return &t.tree, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the path to the tree, resolving to the given node.
|
||||||
|
// If path or a prefix of path has already been added to the tree,
|
||||||
|
// Add panics.
|
||||||
|
//
|
||||||
|
// Add is only safe to call before starting to serve requests.
|
||||||
|
func (t *Tree) Add(path string, node Node) {
|
||||||
|
path = pathpkg.Clean("/" + path)[1:]
|
||||||
|
elems := strings.Split(path, "/")
|
||||||
|
dir := Node(&t.tree)
|
||||||
|
for i, elem := range elems {
|
||||||
|
dt, ok := dir.(*tree)
|
||||||
|
if !ok {
|
||||||
|
panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path)
|
||||||
|
}
|
||||||
|
n := dt.lookup(elem)
|
||||||
|
if n != nil {
|
||||||
|
if i+1 == len(elems) {
|
||||||
|
panic("fuse: Tree.Add for " + path + " conflicts with " + elem)
|
||||||
|
}
|
||||||
|
dir = n
|
||||||
|
} else {
|
||||||
|
if i+1 == len(elems) {
|
||||||
|
dt.add(elem, node)
|
||||||
|
} else {
|
||||||
|
dir = &tree{}
|
||||||
|
dt.add(elem, dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type treeDir struct {
|
||||||
|
name string
|
||||||
|
node Node
|
||||||
|
}
|
||||||
|
|
||||||
|
type tree struct {
|
||||||
|
dir []treeDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) lookup(name string) Node {
|
||||||
|
for _, d := range t.dir {
|
||||||
|
if d.name == name {
|
||||||
|
return d.node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) add(name string, n Node) {
|
||||||
|
t.dir = append(t.dir, treeDir{name, n})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||||
|
a.Mode = os.ModeDir | 0555
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) Lookup(ctx context.Context, name string) (Node, error) {
|
||||||
|
n := t.lookup(name)
|
||||||
|
if n != nil {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
return nil, fuse.ENOENT
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||||
|
var out []fuse.Dirent
|
||||||
|
for _, d := range t.dir {
|
||||||
|
out = append(out, fuse.Dirent{Name: d.name})
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
2303
vendor/bazil.org/fuse/fuse.go
generated
vendored
Normal file
2303
vendor/bazil.org/fuse/fuse.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
Normal file
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
// Maximum file write size we are prepared to receive from the kernel.
|
||||||
|
//
|
||||||
|
// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will
|
||||||
|
// forcibly close the /dev/fuse file descriptor on a Setxattr with a
|
||||||
|
// 16MB value. See TestSetxattr16MB and
|
||||||
|
// https://github.com/bazil/fuse/issues/42
|
||||||
|
const maxWrite = 16 * 1024 * 1024
|
6
vendor/bazil.org/fuse/fuse_freebsd.go
generated
vendored
Normal file
6
vendor/bazil.org/fuse/fuse_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
// Maximum file write size we are prepared to receive from the kernel.
|
||||||
|
//
|
||||||
|
// This number is just a guess.
|
||||||
|
const maxWrite = 128 * 1024
|
774
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
Normal file
774
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
Normal file
|
@ -0,0 +1,774 @@
|
||||||
|
// See the file LICENSE for copyright and licensing information.
|
||||||
|
|
||||||
|
// Derived from FUSE's fuse_kernel.h, which carries this notice:
|
||||||
|
/*
|
||||||
|
This file defines the kernel interface of FUSE
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||||
|
|
||||||
|
|
||||||
|
This -- and only this -- header file may also be distributed under
|
||||||
|
the terms of the BSD Licence as follows:
|
||||||
|
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||||
|
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||||
|
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||||
|
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The FUSE version implemented by the package.
|
||||||
|
const (
|
||||||
|
protoVersionMinMajor = 7
|
||||||
|
protoVersionMinMinor = 8
|
||||||
|
protoVersionMaxMajor = 7
|
||||||
|
protoVersionMaxMinor = 12
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rootID = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
type kstatfs struct {
|
||||||
|
Blocks uint64
|
||||||
|
Bfree uint64
|
||||||
|
Bavail uint64
|
||||||
|
Files uint64
|
||||||
|
Ffree uint64
|
||||||
|
Bsize uint32
|
||||||
|
Namelen uint32
|
||||||
|
Frsize uint32
|
||||||
|
_ uint32
|
||||||
|
Spare [6]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileLock struct {
|
||||||
|
Start uint64
|
||||||
|
End uint64
|
||||||
|
Type uint32
|
||||||
|
Pid uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetattrFlags are bit flags that can be seen in GetattrRequest.
|
||||||
|
type GetattrFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Indicates the handle is valid.
|
||||||
|
GetattrFh GetattrFlags = 1 << 0
|
||||||
|
)
|
||||||
|
|
||||||
|
var getattrFlagsNames = []flagName{
|
||||||
|
{uint32(GetattrFh), "GetattrFh"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl GetattrFlags) String() string {
|
||||||
|
return flagString(uint32(fl), getattrFlagsNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The SetattrValid are bit flags describing which fields in the SetattrRequest
|
||||||
|
// are included in the change.
|
||||||
|
type SetattrValid uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
SetattrMode SetattrValid = 1 << 0
|
||||||
|
SetattrUid SetattrValid = 1 << 1
|
||||||
|
SetattrGid SetattrValid = 1 << 2
|
||||||
|
SetattrSize SetattrValid = 1 << 3
|
||||||
|
SetattrAtime SetattrValid = 1 << 4
|
||||||
|
SetattrMtime SetattrValid = 1 << 5
|
||||||
|
SetattrHandle SetattrValid = 1 << 6
|
||||||
|
|
||||||
|
// Linux only(?)
|
||||||
|
SetattrAtimeNow SetattrValid = 1 << 7
|
||||||
|
SetattrMtimeNow SetattrValid = 1 << 8
|
||||||
|
SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
SetattrCrtime SetattrValid = 1 << 28
|
||||||
|
SetattrChgtime SetattrValid = 1 << 29
|
||||||
|
SetattrBkuptime SetattrValid = 1 << 30
|
||||||
|
SetattrFlags SetattrValid = 1 << 31
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 }
|
||||||
|
func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 }
|
||||||
|
func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 }
|
||||||
|
func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 }
|
||||||
|
func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 }
|
||||||
|
func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 }
|
||||||
|
func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 }
|
||||||
|
func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 }
|
||||||
|
func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 }
|
||||||
|
func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 }
|
||||||
|
func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 }
|
||||||
|
func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 }
|
||||||
|
func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 }
|
||||||
|
func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 }
|
||||||
|
|
||||||
|
func (fl SetattrValid) String() string {
|
||||||
|
return flagString(uint32(fl), setattrValidNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var setattrValidNames = []flagName{
|
||||||
|
{uint32(SetattrMode), "SetattrMode"},
|
||||||
|
{uint32(SetattrUid), "SetattrUid"},
|
||||||
|
{uint32(SetattrGid), "SetattrGid"},
|
||||||
|
{uint32(SetattrSize), "SetattrSize"},
|
||||||
|
{uint32(SetattrAtime), "SetattrAtime"},
|
||||||
|
{uint32(SetattrMtime), "SetattrMtime"},
|
||||||
|
{uint32(SetattrHandle), "SetattrHandle"},
|
||||||
|
{uint32(SetattrAtimeNow), "SetattrAtimeNow"},
|
||||||
|
{uint32(SetattrMtimeNow), "SetattrMtimeNow"},
|
||||||
|
{uint32(SetattrLockOwner), "SetattrLockOwner"},
|
||||||
|
{uint32(SetattrCrtime), "SetattrCrtime"},
|
||||||
|
{uint32(SetattrChgtime), "SetattrChgtime"},
|
||||||
|
{uint32(SetattrBkuptime), "SetattrBkuptime"},
|
||||||
|
{uint32(SetattrFlags), "SetattrFlags"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flags that can be seen in OpenRequest.Flags.
|
||||||
|
const (
|
||||||
|
// Access modes. These are not 1-bit flags, but alternatives where
|
||||||
|
// only one can be chosen. See the IsReadOnly etc convenience
|
||||||
|
// methods.
|
||||||
|
OpenReadOnly OpenFlags = syscall.O_RDONLY
|
||||||
|
OpenWriteOnly OpenFlags = syscall.O_WRONLY
|
||||||
|
OpenReadWrite OpenFlags = syscall.O_RDWR
|
||||||
|
|
||||||
|
// File was opened in append-only mode, all writes will go to end
|
||||||
|
// of file. OS X does not provide this information.
|
||||||
|
OpenAppend OpenFlags = syscall.O_APPEND
|
||||||
|
OpenCreate OpenFlags = syscall.O_CREAT
|
||||||
|
OpenDirectory OpenFlags = syscall.O_DIRECTORY
|
||||||
|
OpenExclusive OpenFlags = syscall.O_EXCL
|
||||||
|
OpenNonblock OpenFlags = syscall.O_NONBLOCK
|
||||||
|
OpenSync OpenFlags = syscall.O_SYNC
|
||||||
|
OpenTruncate OpenFlags = syscall.O_TRUNC
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpenAccessModeMask is a bitmask that separates the access mode
|
||||||
|
// from the other flags in OpenFlags.
|
||||||
|
const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE
|
||||||
|
|
||||||
|
// OpenFlags are the O_FOO flags passed to open/create/etc calls. For
|
||||||
|
// example, os.O_WRONLY | os.O_APPEND.
|
||||||
|
type OpenFlags uint32
|
||||||
|
|
||||||
|
func (fl OpenFlags) String() string {
|
||||||
|
// O_RDONLY, O_RWONLY, O_RDWR are not flags
|
||||||
|
s := accModeName(fl & OpenAccessModeMask)
|
||||||
|
flags := uint32(fl &^ OpenAccessModeMask)
|
||||||
|
if flags != 0 {
|
||||||
|
s = s + "+" + flagString(flags, openFlagNames)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if OpenReadOnly is set.
|
||||||
|
func (fl OpenFlags) IsReadOnly() bool {
|
||||||
|
return fl&OpenAccessModeMask == OpenReadOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if OpenWriteOnly is set.
|
||||||
|
func (fl OpenFlags) IsWriteOnly() bool {
|
||||||
|
return fl&OpenAccessModeMask == OpenWriteOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if OpenReadWrite is set.
|
||||||
|
func (fl OpenFlags) IsReadWrite() bool {
|
||||||
|
return fl&OpenAccessModeMask == OpenReadWrite
|
||||||
|
}
|
||||||
|
|
||||||
|
func accModeName(flags OpenFlags) string {
|
||||||
|
switch flags {
|
||||||
|
case OpenReadOnly:
|
||||||
|
return "OpenReadOnly"
|
||||||
|
case OpenWriteOnly:
|
||||||
|
return "OpenWriteOnly"
|
||||||
|
case OpenReadWrite:
|
||||||
|
return "OpenReadWrite"
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var openFlagNames = []flagName{
|
||||||
|
{uint32(OpenAppend), "OpenAppend"},
|
||||||
|
{uint32(OpenCreate), "OpenCreate"},
|
||||||
|
{uint32(OpenDirectory), "OpenDirectory"},
|
||||||
|
{uint32(OpenExclusive), "OpenExclusive"},
|
||||||
|
{uint32(OpenNonblock), "OpenNonblock"},
|
||||||
|
{uint32(OpenSync), "OpenSync"},
|
||||||
|
{uint32(OpenTruncate), "OpenTruncate"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// The OpenResponseFlags are returned in the OpenResponse.
|
||||||
|
type OpenResponseFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file
|
||||||
|
OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open
|
||||||
|
OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X)
|
||||||
|
|
||||||
|
OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X
|
||||||
|
OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fl OpenResponseFlags) String() string {
|
||||||
|
return flagString(uint32(fl), openResponseFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var openResponseFlagNames = []flagName{
|
||||||
|
{uint32(OpenDirectIO), "OpenDirectIO"},
|
||||||
|
{uint32(OpenKeepCache), "OpenKeepCache"},
|
||||||
|
{uint32(OpenNonSeekable), "OpenNonSeekable"},
|
||||||
|
{uint32(OpenPurgeAttr), "OpenPurgeAttr"},
|
||||||
|
{uint32(OpenPurgeUBC), "OpenPurgeUBC"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// The InitFlags are used in the Init exchange.
|
||||||
|
type InitFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
InitAsyncRead InitFlags = 1 << 0
|
||||||
|
InitPosixLocks InitFlags = 1 << 1
|
||||||
|
InitFileOps InitFlags = 1 << 2
|
||||||
|
InitAtomicTrunc InitFlags = 1 << 3
|
||||||
|
InitExportSupport InitFlags = 1 << 4
|
||||||
|
InitBigWrites InitFlags = 1 << 5
|
||||||
|
// Do not mask file access modes with umask. Not supported on OS X.
|
||||||
|
InitDontMask InitFlags = 1 << 6
|
||||||
|
InitSpliceWrite InitFlags = 1 << 7
|
||||||
|
InitSpliceMove InitFlags = 1 << 8
|
||||||
|
InitSpliceRead InitFlags = 1 << 9
|
||||||
|
InitFlockLocks InitFlags = 1 << 10
|
||||||
|
InitHasIoctlDir InitFlags = 1 << 11
|
||||||
|
InitAutoInvalData InitFlags = 1 << 12
|
||||||
|
InitDoReaddirplus InitFlags = 1 << 13
|
||||||
|
InitReaddirplusAuto InitFlags = 1 << 14
|
||||||
|
InitAsyncDIO InitFlags = 1 << 15
|
||||||
|
InitWritebackCache InitFlags = 1 << 16
|
||||||
|
InitNoOpenSupport InitFlags = 1 << 17
|
||||||
|
|
||||||
|
InitCaseSensitive InitFlags = 1 << 29 // OS X only
|
||||||
|
InitVolRename InitFlags = 1 << 30 // OS X only
|
||||||
|
InitXtimes InitFlags = 1 << 31 // OS X only
|
||||||
|
)
|
||||||
|
|
||||||
|
type flagName struct {
|
||||||
|
bit uint32
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var initFlagNames = []flagName{
|
||||||
|
{uint32(InitAsyncRead), "InitAsyncRead"},
|
||||||
|
{uint32(InitPosixLocks), "InitPosixLocks"},
|
||||||
|
{uint32(InitFileOps), "InitFileOps"},
|
||||||
|
{uint32(InitAtomicTrunc), "InitAtomicTrunc"},
|
||||||
|
{uint32(InitExportSupport), "InitExportSupport"},
|
||||||
|
{uint32(InitBigWrites), "InitBigWrites"},
|
||||||
|
{uint32(InitDontMask), "InitDontMask"},
|
||||||
|
{uint32(InitSpliceWrite), "InitSpliceWrite"},
|
||||||
|
{uint32(InitSpliceMove), "InitSpliceMove"},
|
||||||
|
{uint32(InitSpliceRead), "InitSpliceRead"},
|
||||||
|
{uint32(InitFlockLocks), "InitFlockLocks"},
|
||||||
|
{uint32(InitHasIoctlDir), "InitHasIoctlDir"},
|
||||||
|
{uint32(InitAutoInvalData), "InitAutoInvalData"},
|
||||||
|
{uint32(InitDoReaddirplus), "InitDoReaddirplus"},
|
||||||
|
{uint32(InitReaddirplusAuto), "InitReaddirplusAuto"},
|
||||||
|
{uint32(InitAsyncDIO), "InitAsyncDIO"},
|
||||||
|
{uint32(InitWritebackCache), "InitWritebackCache"},
|
||||||
|
{uint32(InitNoOpenSupport), "InitNoOpenSupport"},
|
||||||
|
|
||||||
|
{uint32(InitCaseSensitive), "InitCaseSensitive"},
|
||||||
|
{uint32(InitVolRename), "InitVolRename"},
|
||||||
|
{uint32(InitXtimes), "InitXtimes"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl InitFlags) String() string {
|
||||||
|
return flagString(uint32(fl), initFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
func flagString(f uint32, names []flagName) string {
|
||||||
|
var s string
|
||||||
|
|
||||||
|
if f == 0 {
|
||||||
|
return "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range names {
|
||||||
|
if f&n.bit != 0 {
|
||||||
|
s += "+" + n.name
|
||||||
|
f &^= n.bit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f != 0 {
|
||||||
|
s += fmt.Sprintf("%+#x", f)
|
||||||
|
}
|
||||||
|
return s[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ReleaseFlags are used in the Release exchange.
|
||||||
|
type ReleaseFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
ReleaseFlush ReleaseFlags = 1 << 0
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fl ReleaseFlags) String() string {
|
||||||
|
return flagString(uint32(fl), releaseFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var releaseFlagNames = []flagName{
|
||||||
|
{uint32(ReleaseFlush), "ReleaseFlush"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opcodes
|
||||||
|
const (
|
||||||
|
opLookup = 1
|
||||||
|
opForget = 2 // no reply
|
||||||
|
opGetattr = 3
|
||||||
|
opSetattr = 4
|
||||||
|
opReadlink = 5
|
||||||
|
opSymlink = 6
|
||||||
|
opMknod = 8
|
||||||
|
opMkdir = 9
|
||||||
|
opUnlink = 10
|
||||||
|
opRmdir = 11
|
||||||
|
opRename = 12
|
||||||
|
opLink = 13
|
||||||
|
opOpen = 14
|
||||||
|
opRead = 15
|
||||||
|
opWrite = 16
|
||||||
|
opStatfs = 17
|
||||||
|
opRelease = 18
|
||||||
|
opFsync = 20
|
||||||
|
opSetxattr = 21
|
||||||
|
opGetxattr = 22
|
||||||
|
opListxattr = 23
|
||||||
|
opRemovexattr = 24
|
||||||
|
opFlush = 25
|
||||||
|
opInit = 26
|
||||||
|
opOpendir = 27
|
||||||
|
opReaddir = 28
|
||||||
|
opReleasedir = 29
|
||||||
|
opFsyncdir = 30
|
||||||
|
opGetlk = 31
|
||||||
|
opSetlk = 32
|
||||||
|
opSetlkw = 33
|
||||||
|
opAccess = 34
|
||||||
|
opCreate = 35
|
||||||
|
opInterrupt = 36
|
||||||
|
opBmap = 37
|
||||||
|
opDestroy = 38
|
||||||
|
opIoctl = 39 // Linux?
|
||||||
|
opPoll = 40 // Linux?
|
||||||
|
|
||||||
|
// OS X
|
||||||
|
opSetvolname = 61
|
||||||
|
opGetxtimes = 62
|
||||||
|
opExchange = 63
|
||||||
|
)
|
||||||
|
|
||||||
|
type entryOut struct {
|
||||||
|
Nodeid uint64 // Inode ID
|
||||||
|
Generation uint64 // Inode generation
|
||||||
|
EntryValid uint64 // Cache timeout for the name
|
||||||
|
AttrValid uint64 // Cache timeout for the attributes
|
||||||
|
EntryValidNsec uint32
|
||||||
|
AttrValidNsec uint32
|
||||||
|
Attr attr
|
||||||
|
}
|
||||||
|
|
||||||
|
func entryOutSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(entryOut{}.Attr) + unsafe.Offsetof(entryOut{}.Attr.Blksize)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(entryOut{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type forgetIn struct {
|
||||||
|
Nlookup uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type getattrIn struct {
|
||||||
|
GetattrFlags uint32
|
||||||
|
_ uint32
|
||||||
|
Fh uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type attrOut struct {
|
||||||
|
AttrValid uint64 // Cache timeout for the attributes
|
||||||
|
AttrValidNsec uint32
|
||||||
|
_ uint32
|
||||||
|
Attr attr
|
||||||
|
}
|
||||||
|
|
||||||
|
func attrOutSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(attrOut{}.Attr) + unsafe.Offsetof(attrOut{}.Attr.Blksize)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(attrOut{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OS X
|
||||||
|
type getxtimesOut struct {
|
||||||
|
Bkuptime uint64
|
||||||
|
Crtime uint64
|
||||||
|
BkuptimeNsec uint32
|
||||||
|
CrtimeNsec uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type mknodIn struct {
|
||||||
|
Mode uint32
|
||||||
|
Rdev uint32
|
||||||
|
Umask uint32
|
||||||
|
_ uint32
|
||||||
|
// "filename\x00" follows.
|
||||||
|
}
|
||||||
|
|
||||||
|
func mknodInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 12}):
|
||||||
|
return unsafe.Offsetof(mknodIn{}.Umask)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(mknodIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type mkdirIn struct {
|
||||||
|
Mode uint32
|
||||||
|
Umask uint32
|
||||||
|
// filename follows
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkdirInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 12}):
|
||||||
|
return unsafe.Offsetof(mkdirIn{}.Umask) + 4
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(mkdirIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type renameIn struct {
|
||||||
|
Newdir uint64
|
||||||
|
// "oldname\x00newname\x00" follows
|
||||||
|
}
|
||||||
|
|
||||||
|
// OS X
|
||||||
|
type exchangeIn struct {
|
||||||
|
Olddir uint64
|
||||||
|
Newdir uint64
|
||||||
|
Options uint64
|
||||||
|
// "oldname\x00newname\x00" follows
|
||||||
|
}
|
||||||
|
|
||||||
|
type linkIn struct {
|
||||||
|
Oldnodeid uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrInCommon struct {
|
||||||
|
Valid uint32
|
||||||
|
_ uint32
|
||||||
|
Fh uint64
|
||||||
|
Size uint64
|
||||||
|
LockOwner uint64 // unused on OS X?
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Unused2 uint64
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
Unused3 uint32
|
||||||
|
Mode uint32
|
||||||
|
Unused4 uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Unused5 uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type openIn struct {
|
||||||
|
Flags uint32
|
||||||
|
Unused uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type openOut struct {
|
||||||
|
Fh uint64
|
||||||
|
OpenFlags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type createIn struct {
|
||||||
|
Flags uint32
|
||||||
|
Mode uint32
|
||||||
|
Umask uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func createInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 12}):
|
||||||
|
return unsafe.Offsetof(createIn{}.Umask)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(createIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type releaseIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Flags uint32
|
||||||
|
ReleaseFlags uint32
|
||||||
|
LockOwner uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type flushIn struct {
|
||||||
|
Fh uint64
|
||||||
|
FlushFlags uint32
|
||||||
|
_ uint32
|
||||||
|
LockOwner uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type readIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Offset uint64
|
||||||
|
Size uint32
|
||||||
|
ReadFlags uint32
|
||||||
|
LockOwner uint64
|
||||||
|
Flags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func readInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(readIn{}.ReadFlags) + 4
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(readIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ReadFlags are passed in ReadRequest.
|
||||||
|
type ReadFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LockOwner field is valid.
|
||||||
|
ReadLockOwner ReadFlags = 1 << 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var readFlagNames = []flagName{
|
||||||
|
{uint32(ReadLockOwner), "ReadLockOwner"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl ReadFlags) String() string {
|
||||||
|
return flagString(uint32(fl), readFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Offset uint64
|
||||||
|
Size uint32
|
||||||
|
WriteFlags uint32
|
||||||
|
LockOwner uint64
|
||||||
|
Flags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(writeIn{}.LockOwner)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(writeIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeOut struct {
|
||||||
|
Size uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// The WriteFlags are passed in WriteRequest.
|
||||||
|
type WriteFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
WriteCache WriteFlags = 1 << 0
|
||||||
|
// LockOwner field is valid.
|
||||||
|
WriteLockOwner WriteFlags = 1 << 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var writeFlagNames = []flagName{
|
||||||
|
{uint32(WriteCache), "WriteCache"},
|
||||||
|
{uint32(WriteLockOwner), "WriteLockOwner"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl WriteFlags) String() string {
|
||||||
|
return flagString(uint32(fl), writeFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
const compatStatfsSize = 48
|
||||||
|
|
||||||
|
type statfsOut struct {
|
||||||
|
St kstatfs
|
||||||
|
}
|
||||||
|
|
||||||
|
type fsyncIn struct {
|
||||||
|
Fh uint64
|
||||||
|
FsyncFlags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrInCommon struct {
|
||||||
|
Size uint32
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (setxattrInCommon) position() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrInCommon struct {
|
||||||
|
Size uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (getxattrInCommon) position() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrOut struct {
|
||||||
|
Size uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type lkIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Owner uint64
|
||||||
|
Lk fileLock
|
||||||
|
LkFlags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func lkInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(lkIn{}.LkFlags)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(lkIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type lkOut struct {
|
||||||
|
Lk fileLock
|
||||||
|
}
|
||||||
|
|
||||||
|
type accessIn struct {
|
||||||
|
Mask uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type initIn struct {
|
||||||
|
Major uint32
|
||||||
|
Minor uint32
|
||||||
|
MaxReadahead uint32
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
const initInSize = int(unsafe.Sizeof(initIn{}))
|
||||||
|
|
||||||
|
type initOut struct {
|
||||||
|
Major uint32
|
||||||
|
Minor uint32
|
||||||
|
MaxReadahead uint32
|
||||||
|
Flags uint32
|
||||||
|
Unused uint32
|
||||||
|
MaxWrite uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type interruptIn struct {
|
||||||
|
Unique uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type bmapIn struct {
|
||||||
|
Block uint64
|
||||||
|
BlockSize uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bmapOut struct {
|
||||||
|
Block uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type inHeader struct {
|
||||||
|
Len uint32
|
||||||
|
Opcode uint32
|
||||||
|
Unique uint64
|
||||||
|
Nodeid uint64
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Pid uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
const inHeaderSize = int(unsafe.Sizeof(inHeader{}))
|
||||||
|
|
||||||
|
type outHeader struct {
|
||||||
|
Len uint32
|
||||||
|
Error int32
|
||||||
|
Unique uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type dirent struct {
|
||||||
|
Ino uint64
|
||||||
|
Off uint64
|
||||||
|
Namelen uint32
|
||||||
|
Type uint32
|
||||||
|
Name [0]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
const direntSize = 8 + 8 + 4 + 4
|
||||||
|
|
||||||
|
const (
|
||||||
|
notifyCodePoll int32 = 1
|
||||||
|
notifyCodeInvalInode int32 = 2
|
||||||
|
notifyCodeInvalEntry int32 = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
type notifyInvalInodeOut struct {
|
||||||
|
Ino uint64
|
||||||
|
Off int64
|
||||||
|
Len int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type notifyInvalEntryOut struct {
|
||||||
|
Parent uint64
|
||||||
|
Namelen uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
Normal file
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type attr struct {
|
||||||
|
Ino uint64
|
||||||
|
Size uint64
|
||||||
|
Blocks uint64
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Ctime uint64
|
||||||
|
Crtime_ uint64 // OS X only
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
CtimeNsec uint32
|
||||||
|
CrtimeNsec uint32 // OS X only
|
||||||
|
Mode uint32
|
||||||
|
Nlink uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Rdev uint32
|
||||||
|
Flags_ uint32 // OS X only; see chflags(2)
|
||||||
|
Blksize uint32
|
||||||
|
padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||||
|
a.Crtime_, a.CrtimeNsec = s, ns
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetFlags(f uint32) {
|
||||||
|
a.Flags_ = f
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrIn struct {
|
||||||
|
setattrInCommon
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
Bkuptime_ uint64
|
||||||
|
Chgtime_ uint64
|
||||||
|
Crtime uint64
|
||||||
|
BkuptimeNsec uint32
|
||||||
|
ChgtimeNsec uint32
|
||||||
|
CrtimeNsec uint32
|
||||||
|
Flags_ uint32 // see chflags(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) BkupTime() time.Time {
|
||||||
|
return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Chgtime() time.Time {
|
||||||
|
return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Flags() uint32 {
|
||||||
|
return in.Flags_
|
||||||
|
}
|
||||||
|
|
||||||
|
func openFlags(flags uint32) OpenFlags {
|
||||||
|
return OpenFlags(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrIn struct {
|
||||||
|
getxattrInCommon
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
Position uint32
|
||||||
|
Padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *getxattrIn) position() uint32 {
|
||||||
|
return g.Position
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrIn struct {
|
||||||
|
setxattrInCommon
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
Position uint32
|
||||||
|
Padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setxattrIn) position() uint32 {
|
||||||
|
return s.Position
|
||||||
|
}
|
62
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
Normal file
62
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type attr struct {
|
||||||
|
Ino uint64
|
||||||
|
Size uint64
|
||||||
|
Blocks uint64
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Ctime uint64
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
CtimeNsec uint32
|
||||||
|
Mode uint32
|
||||||
|
Nlink uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Rdev uint32
|
||||||
|
Blksize uint32
|
||||||
|
padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) Crtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||||
|
// ignored on freebsd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetFlags(f uint32) {
|
||||||
|
// ignored on freebsd
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrIn struct {
|
||||||
|
setattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) BkupTime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Chgtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Flags() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func openFlags(flags uint32) OpenFlags {
|
||||||
|
return OpenFlags(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrIn struct {
|
||||||
|
getxattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrIn struct {
|
||||||
|
setxattrInCommon
|
||||||
|
}
|
70
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
Normal file
70
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type attr struct {
|
||||||
|
Ino uint64
|
||||||
|
Size uint64
|
||||||
|
Blocks uint64
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Ctime uint64
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
CtimeNsec uint32
|
||||||
|
Mode uint32
|
||||||
|
Nlink uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Rdev uint32
|
||||||
|
Blksize uint32
|
||||||
|
padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) Crtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||||
|
// Ignored on Linux.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetFlags(f uint32) {
|
||||||
|
// Ignored on Linux.
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrIn struct {
|
||||||
|
setattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) BkupTime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Chgtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Flags() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func openFlags(flags uint32) OpenFlags {
|
||||||
|
// on amd64, the 32-bit O_LARGEFILE flag is always seen;
|
||||||
|
// on i386, the flag probably depends on the app
|
||||||
|
// requesting, but in any case should be utterly
|
||||||
|
// uninteresting to us here; our kernel protocol messages
|
||||||
|
// are not directly related to the client app's kernel
|
||||||
|
// API/ABI
|
||||||
|
flags &^= 0x8000
|
||||||
|
|
||||||
|
return OpenFlags(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrIn struct {
|
||||||
|
getxattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrIn struct {
|
||||||
|
setxattrInCommon
|
||||||
|
}
|
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
Normal file
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
package fuse
|
7
vendor/bazil.org/fuse/fuse_linux.go
generated
vendored
Normal file
7
vendor/bazil.org/fuse/fuse_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
// Maximum file write size we are prepared to receive from the kernel.
|
||||||
|
//
|
||||||
|
// Linux 4.2.0 has been observed to cap this value at 128kB
|
||||||
|
// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages).
|
||||||
|
const maxWrite = 128 * 1024
|
20
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
Normal file
20
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
package fuseutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bazil.org/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandleRead handles a read request assuming that data is the entire file content.
|
||||||
|
// It adjusts the amount returned in resp according to req.Offset and req.Size.
|
||||||
|
func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) {
|
||||||
|
if req.Offset >= int64(len(data)) {
|
||||||
|
data = nil
|
||||||
|
} else {
|
||||||
|
data = data[req.Offset:]
|
||||||
|
}
|
||||||
|
if len(data) > req.Size {
|
||||||
|
data = data[:req.Size]
|
||||||
|
}
|
||||||
|
n := copy(resp.Data[:req.Size], data)
|
||||||
|
resp.Data = resp.Data[:n]
|
||||||
|
}
|
38
vendor/bazil.org/fuse/mount.go
generated
vendored
Normal file
38
vendor/bazil.org/fuse/mount.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrOSXFUSENotFound is returned from Mount when the OSXFUSE
|
||||||
|
// installation is not detected.
|
||||||
|
//
|
||||||
|
// Only happens on OS X. Make sure OSXFUSE is installed, or see
|
||||||
|
// OSXFUSELocations for customization.
|
||||||
|
ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE")
|
||||||
|
)
|
||||||
|
|
||||||
|
func neverIgnoreLine(line string) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func lineLogger(wg *sync.WaitGroup, prefix string, ignore func(line string) bool, r io.ReadCloser) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if ignore(line) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Printf("%s: %s", prefix, line)
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
log.Printf("%s, error reading: %v", prefix, err)
|
||||||
|
}
|
||||||
|
}
|
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
Normal file
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,208 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNoAvail = errors.New("no available fuse devices")
|
||||||
|
errNotLoaded = errors.New("osxfuse is not loaded")
|
||||||
|
)
|
||||||
|
|
||||||
|
func loadOSXFUSE(bin string) error {
|
||||||
|
cmd := exec.Command(bin)
|
||||||
|
cmd.Dir = "/"
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
err := cmd.Run()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func openOSXFUSEDev(devPrefix string) (*os.File, error) {
|
||||||
|
var f *os.File
|
||||||
|
var err error
|
||||||
|
for i := uint64(0); ; i++ {
|
||||||
|
path := devPrefix + strconv.FormatUint(i, 10)
|
||||||
|
f, err = os.OpenFile(path, os.O_RDWR, 0000)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if i == 0 {
|
||||||
|
// not even the first device was found -> fuse is not loaded
|
||||||
|
return nil, errNotLoaded
|
||||||
|
}
|
||||||
|
|
||||||
|
// we've run out of kernel-provided devices
|
||||||
|
return nil, errNoAvail
|
||||||
|
}
|
||||||
|
|
||||||
|
if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY {
|
||||||
|
// try the next one
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) {
|
||||||
|
var noMountpointPrefix = helperName + `: `
|
||||||
|
const noMountpointSuffix = `: No such file or directory`
|
||||||
|
return func(line string) (ignore bool) {
|
||||||
|
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||||
|
// re-extract it from the error message in case some layer
|
||||||
|
// changed the path
|
||||||
|
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||||
|
err := &MountpointDoesNotExistError{
|
||||||
|
Path: mountpoint,
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case errCh <- err:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// not the first error; fall back to logging it
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBoringMountOSXFUSEError returns whether the Wait error is
|
||||||
|
// uninteresting; exit status 64 is.
|
||||||
|
func isBoringMountOSXFUSEError(err error) bool {
|
||||||
|
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||||
|
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error {
|
||||||
|
for k, v := range conf.options {
|
||||||
|
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||||
|
// Silly limitation but the mount helper does not
|
||||||
|
// understand any escaping. See TestMountOptionCommaError.
|
||||||
|
return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd := exec.Command(
|
||||||
|
bin,
|
||||||
|
"-o", conf.getOptions(),
|
||||||
|
// Tell osxfuse-kext how large our buffer is. It must split
|
||||||
|
// writes larger than this into multiple writes.
|
||||||
|
//
|
||||||
|
// OSXFUSE seems to ignore InitResponse.MaxWrite, and uses
|
||||||
|
// this instead.
|
||||||
|
"-o", "iosize="+strconv.FormatUint(maxWrite, 10),
|
||||||
|
// refers to fd passed in cmd.ExtraFiles
|
||||||
|
"3",
|
||||||
|
dir,
|
||||||
|
)
|
||||||
|
cmd.ExtraFiles = []*os.File{f}
|
||||||
|
cmd.Env = os.Environ()
|
||||||
|
// OSXFUSE <3.3.0
|
||||||
|
cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=")
|
||||||
|
// OSXFUSE >=3.3.0
|
||||||
|
cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=")
|
||||||
|
|
||||||
|
daemon := os.Args[0]
|
||||||
|
if daemonVar != "" {
|
||||||
|
cmd.Env = append(cmd.Env, daemonVar+"="+daemon)
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("mount_osxfusefs: %v", err)
|
||||||
|
}
|
||||||
|
helperErrCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||||
|
helperName := path.Base(bin)
|
||||||
|
go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr)
|
||||||
|
wg.Wait()
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
// see if we have a better error to report
|
||||||
|
select {
|
||||||
|
case helperErr := <-helperErrCh:
|
||||||
|
// log the Wait error if it's not what we expected
|
||||||
|
if !isBoringMountOSXFUSEError(err) {
|
||||||
|
log.Printf("mount helper failed: %v", err)
|
||||||
|
}
|
||||||
|
// and now return what we grabbed from stderr as the real
|
||||||
|
// error
|
||||||
|
*errp = helperErr
|
||||||
|
close(ready)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// nope, fall back to generic message
|
||||||
|
}
|
||||||
|
|
||||||
|
*errp = fmt.Errorf("mount_osxfusefs: %v", err)
|
||||||
|
close(ready)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
*errp = nil
|
||||||
|
close(ready)
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||||
|
locations := conf.osxfuseLocations
|
||||||
|
if locations == nil {
|
||||||
|
locations = []OSXFUSEPaths{
|
||||||
|
OSXFUSELocationV3,
|
||||||
|
OSXFUSELocationV2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, loc := range locations {
|
||||||
|
if _, err := os.Stat(loc.Mount); os.IsNotExist(err) {
|
||||||
|
// try the other locations
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := openOSXFUSEDev(loc.DevicePrefix)
|
||||||
|
if err == errNotLoaded {
|
||||||
|
err = loadOSXFUSE(loc.Load)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// try again
|
||||||
|
f, err = openOSXFUSEDev(loc.DevicePrefix)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
return nil, ErrOSXFUSENotFound
|
||||||
|
}
|
111
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
Normal file
111
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handleMountFusefsStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||||
|
return func(line string) (ignore bool) {
|
||||||
|
const (
|
||||||
|
noMountpointPrefix = `mount_fusefs: `
|
||||||
|
noMountpointSuffix = `: No such file or directory`
|
||||||
|
)
|
||||||
|
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||||
|
// re-extract it from the error message in case some layer
|
||||||
|
// changed the path
|
||||||
|
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||||
|
err := &MountpointDoesNotExistError{
|
||||||
|
Path: mountpoint,
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case errCh <- err:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// not the first error; fall back to logging it
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBoringMountFusefsError returns whether the Wait error is
|
||||||
|
// uninteresting; exit status 1 is.
|
||||||
|
func isBoringMountFusefsError(err error) bool {
|
||||||
|
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||||
|
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||||
|
for k, v := range conf.options {
|
||||||
|
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||||
|
// Silly limitation but the mount helper does not
|
||||||
|
// understand any escaping. See TestMountOptionCommaError.
|
||||||
|
return nil, fmt.Errorf("mount options cannot contain commas on FreeBSD: %q=%q", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000)
|
||||||
|
if err != nil {
|
||||||
|
*errp = err
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(
|
||||||
|
"/sbin/mount_fusefs",
|
||||||
|
"--safe",
|
||||||
|
"-o", conf.getOptions(),
|
||||||
|
"3",
|
||||||
|
dir,
|
||||||
|
)
|
||||||
|
cmd.ExtraFiles = []*os.File{f}
|
||||||
|
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||||
|
}
|
||||||
|
helperErrCh := make(chan error, 1)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||||
|
go lineLogger(&wg, "mount helper error", handleMountFusefsStderr(helperErrCh), stderr)
|
||||||
|
wg.Wait()
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
// see if we have a better error to report
|
||||||
|
select {
|
||||||
|
case helperErr := <-helperErrCh:
|
||||||
|
// log the Wait error if it's not what we expected
|
||||||
|
if !isBoringMountFusefsError(err) {
|
||||||
|
log.Printf("mount helper failed: %v", err)
|
||||||
|
}
|
||||||
|
// and now return what we grabbed from stderr as the real
|
||||||
|
// error
|
||||||
|
return nil, helperErr
|
||||||
|
default:
|
||||||
|
// nope, fall back to generic message
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(ready)
|
||||||
|
return f, nil
|
||||||
|
}
|
150
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
Normal file
150
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handleFusermountStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||||
|
return func(line string) (ignore bool) {
|
||||||
|
if line == `fusermount: failed to open /etc/fuse.conf: Permission denied` {
|
||||||
|
// Silence this particular message, it occurs way too
|
||||||
|
// commonly and isn't very relevant to whether the mount
|
||||||
|
// succeeds or not.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
noMountpointPrefix = `fusermount: failed to access mountpoint `
|
||||||
|
noMountpointSuffix = `: No such file or directory`
|
||||||
|
)
|
||||||
|
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||||
|
// re-extract it from the error message in case some layer
|
||||||
|
// changed the path
|
||||||
|
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||||
|
err := &MountpointDoesNotExistError{
|
||||||
|
Path: mountpoint,
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case errCh <- err:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// not the first error; fall back to logging it
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBoringFusermountError returns whether the Wait error is
|
||||||
|
// uninteresting; exit status 1 is.
|
||||||
|
func isBoringFusermountError(err error) bool {
|
||||||
|
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||||
|
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) {
|
||||||
|
// linux mount is never delayed
|
||||||
|
close(ready)
|
||||||
|
|
||||||
|
fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("socketpair error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes")
|
||||||
|
defer writeFile.Close()
|
||||||
|
|
||||||
|
readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads")
|
||||||
|
defer readFile.Close()
|
||||||
|
|
||||||
|
cmd := exec.Command(
|
||||||
|
"fusermount",
|
||||||
|
"-o", conf.getOptions(),
|
||||||
|
"--",
|
||||||
|
dir,
|
||||||
|
)
|
||||||
|
cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3")
|
||||||
|
|
||||||
|
cmd.ExtraFiles = []*os.File{writeFile}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, fmt.Errorf("fusermount: %v", err)
|
||||||
|
}
|
||||||
|
helperErrCh := make(chan error, 1)
|
||||||
|
wg.Add(2)
|
||||||
|
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||||
|
go lineLogger(&wg, "mount helper error", handleFusermountStderr(helperErrCh), stderr)
|
||||||
|
wg.Wait()
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
// see if we have a better error to report
|
||||||
|
select {
|
||||||
|
case helperErr := <-helperErrCh:
|
||||||
|
// log the Wait error if it's not what we expected
|
||||||
|
if !isBoringFusermountError(err) {
|
||||||
|
log.Printf("mount helper failed: %v", err)
|
||||||
|
}
|
||||||
|
// and now return what we grabbed from stderr as the real
|
||||||
|
// error
|
||||||
|
return nil, helperErr
|
||||||
|
default:
|
||||||
|
// nope, fall back to generic message
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("fusermount: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := net.FileConn(readFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("FileConn from fusermount socket: %v", err)
|
||||||
|
}
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
uc, ok := c.(*net.UnixConn)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 32) // expect 1 byte
|
||||||
|
oob := make([]byte, 32) // expect 24 bytes
|
||||||
|
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
|
||||||
|
scms, err := syscall.ParseSocketControlMessage(oob[:oobn])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("ParseSocketControlMessage: %v", err)
|
||||||
|
}
|
||||||
|
if len(scms) != 1 {
|
||||||
|
return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms)
|
||||||
|
}
|
||||||
|
scm := scms[0]
|
||||||
|
gotFds, err := syscall.ParseUnixRights(&scm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err)
|
||||||
|
}
|
||||||
|
if len(gotFds) != 1 {
|
||||||
|
return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds)
|
||||||
|
}
|
||||||
|
f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse")
|
||||||
|
return f, nil
|
||||||
|
}
|
310
vendor/bazil.org/fuse/options.go
generated
vendored
Normal file
310
vendor/bazil.org/fuse/options.go
generated
vendored
Normal file
|
@ -0,0 +1,310 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dummyOption(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountConfig holds the configuration for a mount operation.
|
||||||
|
// Use it by passing MountOption values to Mount.
|
||||||
|
type mountConfig struct {
|
||||||
|
options map[string]string
|
||||||
|
maxReadahead uint32
|
||||||
|
initFlags InitFlags
|
||||||
|
osxfuseLocations []OSXFUSEPaths
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeComma(s string) string {
|
||||||
|
s = strings.Replace(s, `\`, `\\`, -1)
|
||||||
|
s = strings.Replace(s, `,`, `\,`, -1)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOptions makes a string of options suitable for passing to FUSE
|
||||||
|
// mount flag `-o`. Returns an empty string if no options were set.
|
||||||
|
// Any platform specific adjustments should happen before the call.
|
||||||
|
func (m *mountConfig) getOptions() string {
|
||||||
|
var opts []string
|
||||||
|
for k, v := range m.options {
|
||||||
|
k = escapeComma(k)
|
||||||
|
if v != "" {
|
||||||
|
k += "=" + escapeComma(v)
|
||||||
|
}
|
||||||
|
opts = append(opts, k)
|
||||||
|
}
|
||||||
|
return strings.Join(opts, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
type mountOption func(*mountConfig) error
|
||||||
|
|
||||||
|
// MountOption is passed to Mount to change the behavior of the mount.
|
||||||
|
type MountOption mountOption
|
||||||
|
|
||||||
|
// FSName sets the file system name (also called source) that is
|
||||||
|
// visible in the list of mounted file systems.
|
||||||
|
//
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func FSName(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["fsname"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subtype sets the subtype of the mount. The main type is always
|
||||||
|
// `fuse`. The type in a list of mounted file systems will look like
|
||||||
|
// `fuse.foo`.
|
||||||
|
//
|
||||||
|
// OS X ignores this option.
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func Subtype(fstype string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["subtype"] = fstype
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalVolume sets the volume to be local (instead of network),
|
||||||
|
// changing the behavior of Finder, Spotlight, and such.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func LocalVolume() MountOption {
|
||||||
|
return localVolume
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeName sets the volume name shown in Finder.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func VolumeName(name string) MountOption {
|
||||||
|
return volumeName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoAppleDouble makes OSXFUSE disallow files with names used by OS X
|
||||||
|
// to store extended attributes on file systems that do not support
|
||||||
|
// them natively.
|
||||||
|
//
|
||||||
|
// Such file names are:
|
||||||
|
//
|
||||||
|
// ._*
|
||||||
|
// .DS_Store
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func NoAppleDouble() MountOption {
|
||||||
|
return noAppleDouble
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||||
|
// prefix "com.apple.". This disables persistent Finder state and
|
||||||
|
// other such information.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func NoAppleXattr() MountOption {
|
||||||
|
return noAppleXattr
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates,
|
||||||
|
// i.e. create calls for which the initiator explicitly set the O_EXCL flag.
|
||||||
|
//
|
||||||
|
// OSXFUSE expects all create calls to return EEXIST in case the file
|
||||||
|
// already exists, regardless of whether O_EXCL was specified or not.
|
||||||
|
// To ensure this behavior, it normally sets OpenExclusive for all
|
||||||
|
// Create calls, regardless of whether the original call had it set.
|
||||||
|
// For distributed filesystems, that may force every file create to be
|
||||||
|
// a distributed consensus action, causing undesirable delays.
|
||||||
|
//
|
||||||
|
// This option makes the FUSE filesystem see the original flag value,
|
||||||
|
// and better decide when to ensure global consensus.
|
||||||
|
//
|
||||||
|
// Note that returning EEXIST on existing file create is still
|
||||||
|
// expected with OSXFUSE, regardless of the presence of the
|
||||||
|
// OpenExclusive flag.
|
||||||
|
//
|
||||||
|
// For more information, see
|
||||||
|
// https://github.com/osxfuse/osxfuse/issues/209
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this options.
|
||||||
|
// Requires OSXFUSE 3.4.1 or newer.
|
||||||
|
func ExclCreate() MountOption {
|
||||||
|
return exclCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// DaemonTimeout sets the time in seconds between a request and a reply before
|
||||||
|
// the FUSE mount is declared dead.
|
||||||
|
//
|
||||||
|
// OS X and FreeBSD only. Others ignore this option.
|
||||||
|
func DaemonTimeout(name string) MountOption {
|
||||||
|
return daemonTimeout(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot")
|
||||||
|
|
||||||
|
// AllowOther allows other users to access the file system.
|
||||||
|
//
|
||||||
|
// Only one of AllowOther or AllowRoot can be used.
|
||||||
|
func AllowOther() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
if _, ok := conf.options["allow_root"]; ok {
|
||||||
|
return ErrCannotCombineAllowOtherAndAllowRoot
|
||||||
|
}
|
||||||
|
conf.options["allow_other"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowRoot allows other users to access the file system.
|
||||||
|
//
|
||||||
|
// Only one of AllowOther or AllowRoot can be used.
|
||||||
|
//
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func AllowRoot() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
if _, ok := conf.options["allow_other"]; ok {
|
||||||
|
return ErrCannotCombineAllowOtherAndAllowRoot
|
||||||
|
}
|
||||||
|
conf.options["allow_root"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowDev enables interpreting character or block special devices on the
|
||||||
|
// filesystem.
|
||||||
|
func AllowDev() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["dev"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowSUID allows set-user-identifier or set-group-identifier bits to take
|
||||||
|
// effect.
|
||||||
|
func AllowSUID() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["suid"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPermissions makes the kernel enforce access control based on
|
||||||
|
// the file mode (as in chmod).
|
||||||
|
//
|
||||||
|
// Without this option, the Node itself decides what is and is not
|
||||||
|
// allowed. This is normally ok because FUSE file systems cannot be
|
||||||
|
// accessed by other users without AllowOther/AllowRoot.
|
||||||
|
//
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func DefaultPermissions() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["default_permissions"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadOnly makes the mount read-only.
|
||||||
|
func ReadOnly() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["ro"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxReadahead sets the number of bytes that can be prefetched for
|
||||||
|
// sequential reads. The kernel can enforce a maximum value lower than
|
||||||
|
// this.
|
||||||
|
//
|
||||||
|
// This setting makes the kernel perform speculative reads that do not
|
||||||
|
// originate from any client process. This usually tremendously
|
||||||
|
// improves read performance.
|
||||||
|
func MaxReadahead(n uint32) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.maxReadahead = n
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsyncRead enables multiple outstanding read requests for the same
|
||||||
|
// handle. Without this, there is at most one request in flight at a
|
||||||
|
// time.
|
||||||
|
func AsyncRead() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.initFlags |= InitAsyncRead
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WritebackCache enables the kernel to buffer writes before sending
|
||||||
|
// them to the FUSE server. Without this, writethrough caching is
|
||||||
|
// used.
|
||||||
|
func WritebackCache() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.initFlags |= InitWritebackCache
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OSXFUSEPaths describes the paths used by an installed OSXFUSE
|
||||||
|
// version. See OSXFUSELocationV3 for typical values.
|
||||||
|
type OSXFUSEPaths struct {
|
||||||
|
// Prefix for the device file. At mount time, an incrementing
|
||||||
|
// number is suffixed until a free FUSE device is found.
|
||||||
|
DevicePrefix string
|
||||||
|
// Path of the load helper, used to load the kernel extension if
|
||||||
|
// no device files are found.
|
||||||
|
Load string
|
||||||
|
// Path of the mount helper, used for the actual mount operation.
|
||||||
|
Mount string
|
||||||
|
// Environment variable used to pass the path to the executable
|
||||||
|
// calling the mount helper.
|
||||||
|
DaemonVar string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default paths for OSXFUSE. See OSXFUSELocations.
|
||||||
|
var (
|
||||||
|
OSXFUSELocationV3 = OSXFUSEPaths{
|
||||||
|
DevicePrefix: "/dev/osxfuse",
|
||||||
|
Load: "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse",
|
||||||
|
Mount: "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse",
|
||||||
|
DaemonVar: "MOUNT_OSXFUSE_DAEMON_PATH",
|
||||||
|
}
|
||||||
|
OSXFUSELocationV2 = OSXFUSEPaths{
|
||||||
|
DevicePrefix: "/dev/osxfuse",
|
||||||
|
Load: "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs",
|
||||||
|
Mount: "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs",
|
||||||
|
DaemonVar: "MOUNT_FUSEFS_DAEMON_PATH",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// OSXFUSELocations sets where to look for OSXFUSE files. The
|
||||||
|
// arguments are all the possible locations. The previous locations
|
||||||
|
// are replaced.
|
||||||
|
//
|
||||||
|
// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are
|
||||||
|
// used.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return errors.New("must specify at least one location for OSXFUSELocations")
|
||||||
|
}
|
||||||
|
// replace previous values, but make a copy so there's no
|
||||||
|
// worries about caller mutating their slice
|
||||||
|
conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNonEmptyMount allows the mounting over a non-empty directory.
|
||||||
|
//
|
||||||
|
// The files in it will be shadowed by the freshly created mount. By
|
||||||
|
// default these mounts are rejected to prevent accidental covering up
|
||||||
|
// of data, which could for example prevent automatic backup.
|
||||||
|
func AllowNonEmptyMount() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["nonempty"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
35
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
Normal file
35
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
func localVolume(conf *mountConfig) error {
|
||||||
|
conf.options["local"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeName(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["volname"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func daemonTimeout(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["daemon_timeout"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleXattr(conf *mountConfig) error {
|
||||||
|
conf.options["noapplexattr"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleDouble(conf *mountConfig) error {
|
||||||
|
conf.options["noappledouble"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func exclCreate(conf *mountConfig) error {
|
||||||
|
conf.options["excl_create"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
28
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
Normal file
28
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
func localVolume(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeName(name string) MountOption {
|
||||||
|
return dummyOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func daemonTimeout(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["timeout"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleXattr(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleDouble(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func exclCreate(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
25
vendor/bazil.org/fuse/options_linux.go
generated
vendored
Normal file
25
vendor/bazil.org/fuse/options_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
func localVolume(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeName(name string) MountOption {
|
||||||
|
return dummyOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func daemonTimeout(name string) MountOption {
|
||||||
|
return dummyOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleXattr(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleDouble(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func exclCreate(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
75
vendor/bazil.org/fuse/protocol.go
generated
vendored
Normal file
75
vendor/bazil.org/fuse/protocol.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Protocol is a FUSE protocol version number.
|
||||||
|
type Protocol struct {
|
||||||
|
Major uint32
|
||||||
|
Minor uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Protocol) String() string {
|
||||||
|
return fmt.Sprintf("%d.%d", p.Major, p.Minor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LT returns whether a is less than b.
|
||||||
|
func (a Protocol) LT(b Protocol) bool {
|
||||||
|
return a.Major < b.Major ||
|
||||||
|
(a.Major == b.Major && a.Minor < b.Minor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GE returns whether a is greater than or equal to b.
|
||||||
|
func (a Protocol) GE(b Protocol) bool {
|
||||||
|
return a.Major > b.Major ||
|
||||||
|
(a.Major == b.Major && a.Minor >= b.Minor)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Protocol) is79() bool {
|
||||||
|
return a.GE(Protocol{7, 9})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAttrBlockSize returns whether Attr.BlockSize is respected by the
|
||||||
|
// kernel.
|
||||||
|
func (a Protocol) HasAttrBlockSize() bool {
|
||||||
|
return a.is79()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadWriteFlags returns whether ReadRequest/WriteRequest
|
||||||
|
// fields Flags and FileFlags are valid.
|
||||||
|
func (a Protocol) HasReadWriteFlags() bool {
|
||||||
|
return a.is79()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGetattrFlags returns whether GetattrRequest field Flags is
|
||||||
|
// valid.
|
||||||
|
func (a Protocol) HasGetattrFlags() bool {
|
||||||
|
return a.is79()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Protocol) is710() bool {
|
||||||
|
return a.GE(Protocol{7, 10})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasOpenNonSeekable returns whether OpenResponse field Flags flag
|
||||||
|
// OpenNonSeekable is supported.
|
||||||
|
func (a Protocol) HasOpenNonSeekable() bool {
|
||||||
|
return a.is710()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Protocol) is712() bool {
|
||||||
|
return a.GE(Protocol{7, 12})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest
|
||||||
|
// field Umask is valid.
|
||||||
|
func (a Protocol) HasUmask() bool {
|
||||||
|
return a.is712()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasInvalidate returns whether InvalidateNode/InvalidateEntry are
|
||||||
|
// supported.
|
||||||
|
func (a Protocol) HasInvalidate() bool {
|
||||||
|
return a.is712()
|
||||||
|
}
|
6
vendor/bazil.org/fuse/unmount.go
generated
vendored
Normal file
6
vendor/bazil.org/fuse/unmount.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
// Unmount tries to unmount the filesystem mounted at dir.
|
||||||
|
func Unmount(dir string) error {
|
||||||
|
return unmount(dir)
|
||||||
|
}
|
21
vendor/bazil.org/fuse/unmount_linux.go
generated
vendored
Normal file
21
vendor/bazil.org/fuse/unmount_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func unmount(dir string) error {
|
||||||
|
cmd := exec.Command("fusermount", "-u", dir)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
if len(output) > 0 {
|
||||||
|
output = bytes.TrimRight(output, "\n")
|
||||||
|
msg := err.Error() + ": " + string(output)
|
||||||
|
err = errors.New(msg)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
17
vendor/bazil.org/fuse/unmount_std.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/unmount_std.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func unmount(dir string) error {
|
||||||
|
err := syscall.Unmount(dir, 0)
|
||||||
|
if err != nil {
|
||||||
|
err = &os.PathError{Op: "unmount", Path: dir, Err: err}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2014 Google Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
438
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
438
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,438 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package metadata provides access to Google Compute Engine (GCE)
|
||||||
|
// metadata and API service accounts.
|
||||||
|
//
|
||||||
|
// This package is a wrapper around the GCE metadata service,
|
||||||
|
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||||
|
package metadata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// metadataIP is the documented metadata server IP address.
|
||||||
|
metadataIP = "169.254.169.254"
|
||||||
|
|
||||||
|
// metadataHostEnv is the environment variable specifying the
|
||||||
|
// GCE metadata hostname. If empty, the default value of
|
||||||
|
// metadataIP ("169.254.169.254") is used instead.
|
||||||
|
// This is variable name is not defined by any spec, as far as
|
||||||
|
// I know; it was made up for the Go package.
|
||||||
|
metadataHostEnv = "GCE_METADATA_HOST"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cachedValue struct {
|
||||||
|
k string
|
||||||
|
trim bool
|
||||||
|
mu sync.Mutex
|
||||||
|
v string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||||
|
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||||
|
instID = &cachedValue{k: "instance/id", trim: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
metaClient = &http.Client{
|
||||||
|
Transport: &internal.Transport{
|
||||||
|
Base: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
ResponseHeaderTimeout: 2 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
subscribeClient = &http.Client{
|
||||||
|
Transport: &internal.Transport{
|
||||||
|
Base: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NotDefinedError is returned when requested metadata is not defined.
|
||||||
|
//
|
||||||
|
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// This error is not returned if the value is defined to be the empty
|
||||||
|
// string.
|
||||||
|
type NotDefinedError string
|
||||||
|
|
||||||
|
func (suffix NotDefinedError) Error() string {
|
||||||
|
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||||
|
// 169.254.169.254 will be used instead.
|
||||||
|
//
|
||||||
|
// If the requested metadata is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
func Get(suffix string) (string, error) {
|
||||||
|
val, _, err := getETag(metaClient, suffix)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getETag returns a value from the metadata service as well as the associated
|
||||||
|
// ETag using the provided client. This func is otherwise equivalent to Get.
|
||||||
|
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
||||||
|
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||||
|
// a container, which is an important use-case for local testing of cloud
|
||||||
|
// deployments. To enable spoofing of the metadata service, the environment
|
||||||
|
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||||
|
// requests shall go.
|
||||||
|
host := os.Getenv(metadataHostEnv)
|
||||||
|
if host == "" {
|
||||||
|
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||||
|
// binaries built with the "netgo" tag and without cgo won't
|
||||||
|
// know the search suffix for "metadata" is
|
||||||
|
// ".google.internal", and this IP address is documented as
|
||||||
|
// being stable anyway.
|
||||||
|
host = metadataIP
|
||||||
|
}
|
||||||
|
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||||
|
req, _ := http.NewRequest("GET", url, nil)
|
||||||
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
|
res, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return "", "", NotDefinedError(suffix)
|
||||||
|
}
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||||
|
}
|
||||||
|
all, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
return string(all), res.Header.Get("Etag"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTrimmed(suffix string) (s string, err error) {
|
||||||
|
s, err = Get(suffix)
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cachedValue) get() (v string, err error) {
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.v != "" {
|
||||||
|
return c.v, nil
|
||||||
|
}
|
||||||
|
if c.trim {
|
||||||
|
v, err = getTrimmed(c.k)
|
||||||
|
} else {
|
||||||
|
v, err = Get(c.k)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
c.v = v
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
onGCEOnce sync.Once
|
||||||
|
onGCE bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||||
|
func OnGCE() bool {
|
||||||
|
onGCEOnce.Do(initOnGCE)
|
||||||
|
return onGCE
|
||||||
|
}
|
||||||
|
|
||||||
|
func initOnGCE() {
|
||||||
|
onGCE = testOnGCE()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOnGCE() bool {
|
||||||
|
// The user explicitly said they're on GCE, so trust them.
|
||||||
|
if os.Getenv(metadataHostEnv) != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resc := make(chan bool, 2)
|
||||||
|
|
||||||
|
// Try two strategies in parallel.
|
||||||
|
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||||
|
go func() {
|
||||||
|
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
|
||||||
|
if err != nil {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
addrs, err := net.LookupHost("metadata.google.internal")
|
||||||
|
if err != nil || len(addrs) == 0 {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resc <- strsContains(addrs, metadataIP)
|
||||||
|
}()
|
||||||
|
|
||||||
|
tryHarder := systemInfoSuggestsGCE()
|
||||||
|
if tryHarder {
|
||||||
|
res := <-resc
|
||||||
|
if res {
|
||||||
|
// The first strategy succeeded, so let's use it.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Wait for either the DNS or metadata server probe to
|
||||||
|
// contradict the other one and say we are running on
|
||||||
|
// GCE. Give it a lot of time to do so, since the system
|
||||||
|
// info already suggests we're running on a GCE BIOS.
|
||||||
|
timer := time.NewTimer(5 * time.Second)
|
||||||
|
defer timer.Stop()
|
||||||
|
select {
|
||||||
|
case res = <-resc:
|
||||||
|
return res
|
||||||
|
case <-timer.C:
|
||||||
|
// Too slow. Who knows what this system is.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// There's no hint from the system info that we're running on
|
||||||
|
// GCE, so use the first probe's result as truth, whether it's
|
||||||
|
// true or false. The goal here is to optimize for speed for
|
||||||
|
// users who are NOT running on GCE. We can't assume that
|
||||||
|
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||||
|
// address is fast. Worst case this should return when the
|
||||||
|
// metaClient's Transport.ResponseHeaderTimeout or
|
||||||
|
// Transport.Dial.Timeout fires (in two seconds).
|
||||||
|
return <-resc
|
||||||
|
}
|
||||||
|
|
||||||
|
// systemInfoSuggestsGCE reports whether the local system (without
|
||||||
|
// doing network requests) suggests that we're running on GCE. If this
|
||||||
|
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||||
|
// server.
|
||||||
|
func systemInfoSuggestsGCE() bool {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
// We don't have any non-Linux clues available, at least yet.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||||
|
name := strings.TrimSpace(string(slurp))
|
||||||
|
return name == "Google" || name == "Google Compute Engine"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe subscribes to a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
// The suffix may contain query parameters.
|
||||||
|
//
|
||||||
|
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||||
|
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||||
|
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||||
|
// is deleted. Subscribe returns the error value returned from the last call to
|
||||||
|
// fn, which may be nil when ok == false.
|
||||||
|
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
const failedSubscribeSleep = time.Second * 5
|
||||||
|
|
||||||
|
// First check to see if the metadata value exists at all.
|
||||||
|
val, lastETag, err := getETag(subscribeClient, suffix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fn(val, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := true
|
||||||
|
if strings.ContainsRune(suffix, '?') {
|
||||||
|
suffix += "&wait_for_change=true&last_etag="
|
||||||
|
} else {
|
||||||
|
suffix += "?wait_for_change=true&last_etag="
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
||||||
|
if err != nil {
|
||||||
|
if _, deleted := err.(NotDefinedError); !deleted {
|
||||||
|
time.Sleep(failedSubscribeSleep)
|
||||||
|
continue // Retry on other errors.
|
||||||
|
}
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
lastETag = etag
|
||||||
|
|
||||||
|
if err := fn(val, ok); err != nil || !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func ProjectID() (string, error) { return projID.get() }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func NumericProjectID() (string, error) { return projNum.get() }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func InternalIP() (string, error) {
|
||||||
|
return getTrimmed("instance/network-interfaces/0/ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func ExternalIP() (string, error) {
|
||||||
|
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func Hostname() (string, error) {
|
||||||
|
return getTrimmed("instance/hostname")
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func InstanceTags() ([]string, error) {
|
||||||
|
var s []string
|
||||||
|
j, err := Get("instance/tags")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func InstanceID() (string, error) {
|
||||||
|
return instID.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func InstanceName() (string, error) {
|
||||||
|
host, err := Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.Split(host, ".")[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func Zone() (string, error) {
|
||||||
|
zone, err := getTrimmed("instance/zone")
|
||||||
|
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceAttributes returns the list of user-defined attributes,
|
||||||
|
// assigned when initially creating a GCE VM instance. The value of an
|
||||||
|
// attribute can be obtained with InstanceAttributeValue.
|
||||||
|
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||||
|
|
||||||
|
// ProjectAttributes returns the list of user-defined attributes
|
||||||
|
// applying to the project as a whole, not just this VM. The value of
|
||||||
|
// an attribute can be obtained with ProjectAttributeValue.
|
||||||
|
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||||
|
|
||||||
|
func lines(suffix string) ([]string, error) {
|
||||||
|
j, err := Get(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||||
|
for i := range s {
|
||||||
|
s[i] = strings.TrimSpace(s[i])
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceAttributeValue returns the value of the provided VM
|
||||||
|
// instance attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return Get("instance/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue returns the value of the provided
|
||||||
|
// project attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return Get("project/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes returns the service account scopes for the given account.
|
||||||
|
// The account may be empty or the string "default" to use the instance's
|
||||||
|
// main account.
|
||||||
|
func Scopes(serviceAccount string) ([]string, error) {
|
||||||
|
if serviceAccount == "" {
|
||||||
|
serviceAccount = "default"
|
||||||
|
}
|
||||||
|
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||||
|
}
|
||||||
|
|
||||||
|
func strsContains(ss []string, s string) bool {
|
||||||
|
for _, v := range ss {
|
||||||
|
if v == s {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
Normal file
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package internal provides support for the cloud packages.
|
||||||
|
//
|
||||||
|
// Users should not import this package directly.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
const userAgent = "gcloud-golang/0.1"
|
||||||
|
|
||||||
|
// Transport is an http.RoundTripper that appends Google Cloud client's
|
||||||
|
// user-agent to the original request's user-agent header.
|
||||||
|
type Transport struct {
|
||||||
|
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
|
||||||
|
// Do User-Agent some other way.
|
||||||
|
|
||||||
|
// Base is the actual http.RoundTripper
|
||||||
|
// requests will use. It must not be nil.
|
||||||
|
Base http.RoundTripper
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoundTrip appends a user-agent to the existing user-agent
|
||||||
|
// header and delegates the request to the base http.RoundTripper.
|
||||||
|
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
req = cloneRequest(req)
|
||||||
|
ua := req.Header.Get("User-Agent")
|
||||||
|
if ua == "" {
|
||||||
|
ua = userAgent
|
||||||
|
} else {
|
||||||
|
ua = fmt.Sprintf("%s %s", ua, userAgent)
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", ua)
|
||||||
|
return t.Base.RoundTrip(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cloneRequest returns a clone of the provided *http.Request.
|
||||||
|
// The clone is a shallow copy of the struct and its Header map.
|
||||||
|
func cloneRequest(r *http.Request) *http.Request {
|
||||||
|
// shallow copy of the struct
|
||||||
|
r2 := new(http.Request)
|
||||||
|
*r2 = *r
|
||||||
|
// deep copy of the Header
|
||||||
|
r2.Header = make(http.Header)
|
||||||
|
for k, s := range r.Header {
|
||||||
|
r2.Header[k] = s
|
||||||
|
}
|
||||||
|
return r2
|
||||||
|
}
|
3
vendor/github.com/Unknwon/goconfig/.gitignore
generated
vendored
Normal file
3
vendor/github.com/Unknwon/goconfig/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
.DS_Store
|
||||||
|
*.iml
|
||||||
|
.idea
|
191
vendor/github.com/Unknwon/goconfig/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Unknwon/goconfig/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
72
vendor/github.com/Unknwon/goconfig/README.md
generated
vendored
Normal file
72
vendor/github.com/Unknwon/goconfig/README.md
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig)
|
||||||
|
========
|
||||||
|
|
||||||
|
[中文文档](README_ZH.md)
|
||||||
|
|
||||||
|
**IMPORTANT**
|
||||||
|
|
||||||
|
- This library is under bug fix only mode, which means no more features will be added.
|
||||||
|
- I'm continuing working on better Go code with a different library: [ini](https://github.com/go-ini/ini).
|
||||||
|
|
||||||
|
## About
|
||||||
|
|
||||||
|
Package goconfig is a easy-use, comments-support configuration file parser for the Go Programming Language, which provides a structure similar to what you would find on Microsoft Windows INI files.
|
||||||
|
|
||||||
|
The configuration file consists of sections, led by a `[section]` header and followed by `name:value` or `name=value` entries. Note that leading whitespace is removed from values. The optional values can contain format strings which refer to other values in the same section, or values in a special DEFAULT section. Comments are indicated by ";" or "#"; comments may begin anywhere on a single line.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- It simplified operation processes, easy to use and undersatnd; therefore, there are less chances to have errors.
|
||||||
|
- It uses exactly the same way to access a configuration file as you use Windows APIs, so you don't need to change your code style.
|
||||||
|
- It supports read recursion sections.
|
||||||
|
- It supports auto increment of key.
|
||||||
|
- It supports **READ** and **WRITE** configuration file with comments each section or key which all the other parsers don't support!!!!!!!
|
||||||
|
- It supports get value through type bool, float64, int, int64 and string, methods that start with "Must" means ignore errors and get zero-value if error occurs, or you can specify a default value.
|
||||||
|
- It's able to load multiple files to overwrite key values.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
go get github.com/Unknwon/goconfig
|
||||||
|
|
||||||
|
Or
|
||||||
|
|
||||||
|
gopm get github.com/Unknwon/goconfig
|
||||||
|
|
||||||
|
## API Documentation
|
||||||
|
|
||||||
|
[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig).
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
Please see [conf.ini](testdata/conf.ini) as an example.
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
- Function `LoadConfigFile` load file(s) depends on your situation, and return a variable with type `ConfigFile`.
|
||||||
|
- `GetValue` gives basic functionality of getting a value of given section and key.
|
||||||
|
- Methods like `Bool`, `Int`, `Int64` return corresponding type of values.
|
||||||
|
- Methods start with `Must` return corresponding type of values and returns zero-value of given type if something goes wrong.
|
||||||
|
- `SetValue` sets value to given section and key, and inserts somewhere if it does not exist.
|
||||||
|
- `DeleteKey` deletes by given section and key.
|
||||||
|
- Finally, `SaveConfigFile` saves your configuration to local file system.
|
||||||
|
- Use method `Reload` in case someone else modified your file(s).
|
||||||
|
- Methods contains `Comment` help you manipulate comments.
|
||||||
|
- `LoadFromReader` allows loading data without an intermediate file.
|
||||||
|
- `SaveConfigData` added, which writes configuration to an arbitrary writer.
|
||||||
|
- `ReloadData` allows to reload data from memory.
|
||||||
|
|
||||||
|
Note that you cannot mix in-memory configuration with on-disk configuration.
|
||||||
|
|
||||||
|
## More Information
|
||||||
|
|
||||||
|
- All characters are CASE SENSITIVE, BE CAREFUL!
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
- [goconf](http://code.google.com/p/goconf/)
|
||||||
|
- [robfig/config](https://github.com/robfig/config)
|
||||||
|
- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
|
64
vendor/github.com/Unknwon/goconfig/README_ZH.md
generated
vendored
Normal file
64
vendor/github.com/Unknwon/goconfig/README_ZH.md
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig)
|
||||||
|
========
|
||||||
|
|
||||||
|
本库已被 [《Go名库讲解》](https://github.com/Unknwon/go-rock-libraries-showcases/tree/master/lectures/01-goconfig) 收录讲解,欢迎前往学习如何使用!
|
||||||
|
|
||||||
|
编码规范:基于 [Go 编码规范](https://github.com/Unknwon/go-code-convention)
|
||||||
|
|
||||||
|
## 关于
|
||||||
|
|
||||||
|
包 goconfig 是一个易于使用,支持注释的 Go 语言配置文件解析器,该文件的书写格式和 Windows 下的 INI 文件一样。
|
||||||
|
|
||||||
|
配置文件由形为 `[section]` 的节构成,内部使用 `name:value` 或 `name=value` 这样的键值对;每行开头和尾部的空白符号都将被忽略;如果未指定任何节,则会默认放入名为 `DEFAULT` 的节当中;可以使用 “;” 或 “#” 来作为注释的开头,并可以放置于任意的单独一行中。
|
||||||
|
|
||||||
|
## 特性
|
||||||
|
|
||||||
|
- 简化流程,易于理解,更少出错。
|
||||||
|
- 提供与 Windows API 一模一样的操作方式。
|
||||||
|
- 支持读取递归节。
|
||||||
|
- 支持自增键名。
|
||||||
|
- 支持对注释的 **读** 和 **写** 操作,其它所有解析器都不支持!!!!
|
||||||
|
- 可以直接返回 bool, float64, int, int64 和 string 类型的值,如果使用 “Must” 开头的方法,则一定会返回这个类型的一个值而不返回错误,如果错误发生则会返回零值。
|
||||||
|
- 支持加载多个文件来重写值。
|
||||||
|
|
||||||
|
## 安装
|
||||||
|
|
||||||
|
go get github.com/Unknwon/goconfig
|
||||||
|
|
||||||
|
或
|
||||||
|
|
||||||
|
gopm get github.com/Unknwon/goconfig
|
||||||
|
|
||||||
|
|
||||||
|
## API 文档
|
||||||
|
|
||||||
|
[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig).
|
||||||
|
|
||||||
|
## 示例
|
||||||
|
|
||||||
|
请查看 [conf.ini](testdata/conf.ini) 文件作为使用示例。
|
||||||
|
|
||||||
|
### 用例
|
||||||
|
|
||||||
|
- 函数 `LoadConfigFile` 加载一个或多个文件,然后返回一个类型为 `ConfigFile` 的变量。
|
||||||
|
- `GetValue` 可以简单的获取某个值。
|
||||||
|
- 像 `Bool`、`Int`、`Int64` 这样的方法会直接返回指定类型的值。
|
||||||
|
- 以 `Must` 开头的方法不会返回错误,但当错误发生时会返回零值。
|
||||||
|
- `SetValue` 可以设置某个值。
|
||||||
|
- `DeleteKey` 可以删除某个键。
|
||||||
|
- 最后,`SaveConfigFile` 可以保持您的配置到本地文件系统。
|
||||||
|
- 使用方法 `Reload` 可以重载您的配置文件。
|
||||||
|
|
||||||
|
## 更多信息
|
||||||
|
|
||||||
|
- 所有字符都是大小写敏感的!
|
||||||
|
|
||||||
|
## 参考信息
|
||||||
|
|
||||||
|
- [goconf](http://code.google.com/p/goconf/)
|
||||||
|
- [robfig/config](https://github.com/robfig/config)
|
||||||
|
- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0)
|
||||||
|
|
||||||
|
## 授权许可
|
||||||
|
|
||||||
|
本项目采用 Apache v2 开源授权许可证,完整的授权说明已放置在 [LICENSE](LICENSE) 文件中。
|
536
vendor/github.com/Unknwon/goconfig/conf.go
generated
vendored
Normal file
536
vendor/github.com/Unknwon/goconfig/conf.go
generated
vendored
Normal file
|
@ -0,0 +1,536 @@
|
||||||
|
// Copyright 2013 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
// Package goconfig is a fully functional and comments-support configuration file(.ini) parser.
|
||||||
|
package goconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Default section name.
|
||||||
|
DEFAULT_SECTION = "DEFAULT"
|
||||||
|
// Maximum allowed depth when recursively substituing variable names.
|
||||||
|
_DEPTH_VALUES = 200
|
||||||
|
)
|
||||||
|
|
||||||
|
type ParseError int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ERR_SECTION_NOT_FOUND ParseError = iota + 1
|
||||||
|
ERR_KEY_NOT_FOUND
|
||||||
|
ERR_BLANK_SECTION_NAME
|
||||||
|
ERR_COULD_NOT_PARSE
|
||||||
|
)
|
||||||
|
|
||||||
|
var LineBreak = "\n"
|
||||||
|
|
||||||
|
// Variable regexp pattern: %(variable)s
|
||||||
|
var varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
LineBreak = "\r\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A ConfigFile represents a INI formar configuration file.
|
||||||
|
type ConfigFile struct {
|
||||||
|
lock sync.RWMutex // Go map is not safe.
|
||||||
|
fileNames []string // Support mutil-files.
|
||||||
|
data map[string]map[string]string // Section -> key : value
|
||||||
|
|
||||||
|
// Lists can keep sections and keys in order.
|
||||||
|
sectionList []string // Section name list.
|
||||||
|
keyList map[string][]string // Section -> Key name list
|
||||||
|
|
||||||
|
sectionComments map[string]string // Sections comments.
|
||||||
|
keyComments map[string]map[string]string // Keys comments.
|
||||||
|
BlockMode bool // Indicates whether use lock or not.
|
||||||
|
}
|
||||||
|
|
||||||
|
// newConfigFile creates an empty configuration representation.
|
||||||
|
func newConfigFile(fileNames []string) *ConfigFile {
|
||||||
|
c := new(ConfigFile)
|
||||||
|
c.fileNames = fileNames
|
||||||
|
c.data = make(map[string]map[string]string)
|
||||||
|
c.keyList = make(map[string][]string)
|
||||||
|
c.sectionComments = make(map[string]string)
|
||||||
|
c.keyComments = make(map[string]map[string]string)
|
||||||
|
c.BlockMode = true
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue adds a new section-key-value to the configuration.
|
||||||
|
// It returns true if the key and value were inserted,
|
||||||
|
// or returns false if the value was overwritten.
|
||||||
|
// If the section does not exist in advance, it will be created.
|
||||||
|
func (c *ConfigFile) SetValue(section, key, value string) bool {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
if len(key) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.BlockMode {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if section exists.
|
||||||
|
if _, ok := c.data[section]; !ok {
|
||||||
|
// Execute add operation.
|
||||||
|
c.data[section] = make(map[string]string)
|
||||||
|
// Append section to list.
|
||||||
|
c.sectionList = append(c.sectionList, section)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if key exists.
|
||||||
|
_, ok := c.data[section][key]
|
||||||
|
c.data[section][key] = value
|
||||||
|
if !ok {
|
||||||
|
// If not exists, append to key list.
|
||||||
|
c.keyList[section] = append(c.keyList[section], key)
|
||||||
|
}
|
||||||
|
return !ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteKey deletes the key in given section.
|
||||||
|
// It returns true if the key was deleted,
|
||||||
|
// or returns false if the section or key didn't exist.
|
||||||
|
func (c *ConfigFile) DeleteKey(section, key string) bool {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if section exists.
|
||||||
|
if _, ok := c.data[section]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if key exists.
|
||||||
|
if _, ok := c.data[section][key]; ok {
|
||||||
|
delete(c.data[section], key)
|
||||||
|
// Remove comments of key.
|
||||||
|
c.SetKeyComments(section, key, "")
|
||||||
|
// Get index of key.
|
||||||
|
i := 0
|
||||||
|
for _, keyName := range c.keyList[section] {
|
||||||
|
if keyName == key {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
// Remove from key list.
|
||||||
|
c.keyList[section] = append(c.keyList[section][:i], c.keyList[section][i+1:]...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetValue returns the value of key available in the given section.
|
||||||
|
// If the value needs to be unfolded
|
||||||
|
// (see e.g. %(google)s example in the GoConfig_test.go),
|
||||||
|
// then String does this unfolding automatically, up to
|
||||||
|
// _DEPTH_VALUES number of iterations.
|
||||||
|
// It returns an error and empty string value if the section does not exist,
|
||||||
|
// or key does not exist in DEFAULT and current sections.
|
||||||
|
func (c *ConfigFile) GetValue(section, key string) (string, error) {
|
||||||
|
if c.BlockMode {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if section exists
|
||||||
|
if _, ok := c.data[section]; !ok {
|
||||||
|
// Section does not exist.
|
||||||
|
return "", getError{ERR_SECTION_NOT_FOUND, section}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section exists.
|
||||||
|
// Check if key exists or empty value.
|
||||||
|
value, ok := c.data[section][key]
|
||||||
|
if !ok {
|
||||||
|
// Check if it is a sub-section.
|
||||||
|
if i := strings.LastIndex(section, "."); i > -1 {
|
||||||
|
return c.GetValue(section[:i], key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return empty value.
|
||||||
|
return "", getError{ERR_KEY_NOT_FOUND, key}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key exists.
|
||||||
|
var i int
|
||||||
|
for i = 0; i < _DEPTH_VALUES; i++ {
|
||||||
|
vr := varPattern.FindString(value)
|
||||||
|
if len(vr) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take off leading '%(' and trailing ')s'.
|
||||||
|
noption := strings.TrimLeft(vr, "%(")
|
||||||
|
noption = strings.TrimRight(noption, ")s")
|
||||||
|
|
||||||
|
// Search variable in default section.
|
||||||
|
nvalue, err := c.GetValue(DEFAULT_SECTION, noption)
|
||||||
|
if err != nil && section != DEFAULT_SECTION {
|
||||||
|
// Search in the same section.
|
||||||
|
if _, ok := c.data[section][noption]; ok {
|
||||||
|
nvalue = c.data[section][noption]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Substitute by new value and take off leading '%(' and trailing ')s'.
|
||||||
|
value = strings.Replace(value, vr, nvalue, -1)
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns bool type value.
|
||||||
|
func (c *ConfigFile) Bool(section, key string) (bool, error) {
|
||||||
|
value, err := c.GetValue(section, key)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return strconv.ParseBool(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 returns float64 type value.
|
||||||
|
func (c *ConfigFile) Float64(section, key string) (float64, error) {
|
||||||
|
value, err := c.GetValue(section, key)
|
||||||
|
if err != nil {
|
||||||
|
return 0.0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseFloat(value, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns int type value.
|
||||||
|
func (c *ConfigFile) Int(section, key string) (int, error) {
|
||||||
|
value, err := c.GetValue(section, key)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.Atoi(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 returns int64 type value.
|
||||||
|
func (c *ConfigFile) Int64(section, key string) (int64, error) {
|
||||||
|
value, err := c.GetValue(section, key)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseInt(value, 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustValue always returns value without error.
|
||||||
|
// It returns empty string if error occurs, or the default value if given.
|
||||||
|
func (c *ConfigFile) MustValue(section, key string, defaultVal ...string) string {
|
||||||
|
val, err := c.GetValue(section, key)
|
||||||
|
if len(defaultVal) > 0 && (err != nil || len(val) == 0) {
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustValue always returns value without error,
|
||||||
|
// It returns empty string if error occurs, or the default value if given,
|
||||||
|
// and a bool value indicates whether default value is returned.
|
||||||
|
func (c *ConfigFile) MustValueSet(section, key string, defaultVal ...string) (string, bool) {
|
||||||
|
val, err := c.GetValue(section, key)
|
||||||
|
if len(defaultVal) > 0 && (err != nil || len(val) == 0) {
|
||||||
|
c.SetValue(section, key, defaultVal[0])
|
||||||
|
return defaultVal[0], true
|
||||||
|
}
|
||||||
|
return val, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustValueRange always returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into range.
|
||||||
|
func (c *ConfigFile) MustValueRange(section, key, defaultVal string, candidates []string) string {
|
||||||
|
val, err := c.GetValue(section, key)
|
||||||
|
if err != nil || len(val) == 0 {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustValueArray always returns value array without error,
|
||||||
|
// it returns empty array if error occurs, split by delimiter otherwise.
|
||||||
|
func (c *ConfigFile) MustValueArray(section, key, delim string) []string {
|
||||||
|
val, err := c.GetValue(section, key)
|
||||||
|
if err != nil || len(val) == 0 {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := strings.Split(val, delim)
|
||||||
|
for i := range vals {
|
||||||
|
vals[i] = strings.TrimSpace(vals[i])
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustBool always returns value without error,
|
||||||
|
// it returns false if error occurs.
|
||||||
|
func (c *ConfigFile) MustBool(section, key string, defaultVal ...bool) bool {
|
||||||
|
val, err := c.Bool(section, key)
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustFloat64 always returns value without error,
|
||||||
|
// it returns 0.0 if error occurs.
|
||||||
|
func (c *ConfigFile) MustFloat64(section, key string, defaultVal ...float64) float64 {
|
||||||
|
value, err := c.Float64(section, key)
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustInt always returns value without error,
|
||||||
|
// it returns 0 if error occurs.
|
||||||
|
func (c *ConfigFile) MustInt(section, key string, defaultVal ...int) int {
|
||||||
|
value, err := c.Int(section, key)
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustInt64 always returns value without error,
|
||||||
|
// it returns 0 if error occurs.
|
||||||
|
func (c *ConfigFile) MustInt64(section, key string, defaultVal ...int64) int64 {
|
||||||
|
value, err := c.Int64(section, key)
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSectionList returns the list of all sections
|
||||||
|
// in the same order in the file.
|
||||||
|
func (c *ConfigFile) GetSectionList() []string {
|
||||||
|
list := make([]string, len(c.sectionList))
|
||||||
|
copy(list, c.sectionList)
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetKeyList returns the list of all keys in give section
|
||||||
|
// in the same order in the file.
|
||||||
|
// It returns nil if given section does not exist.
|
||||||
|
func (c *ConfigFile) GetKeyList(section string) []string {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if section exists.
|
||||||
|
if _, ok := c.data[section]; !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Non-default section has a blank key as section keeper.
|
||||||
|
offset := 1
|
||||||
|
if section == DEFAULT_SECTION {
|
||||||
|
offset = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
list := make([]string, len(c.keyList[section])-offset)
|
||||||
|
copy(list, c.keyList[section][offset:])
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSection deletes the entire section by given name.
|
||||||
|
// It returns true if the section was deleted, and false if the section didn't exist.
|
||||||
|
func (c *ConfigFile) DeleteSection(section string) bool {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if section exists.
|
||||||
|
if _, ok := c.data[section]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(c.data, section)
|
||||||
|
// Remove comments of section.
|
||||||
|
c.SetSectionComments(section, "")
|
||||||
|
// Get index of section.
|
||||||
|
i := 0
|
||||||
|
for _, secName := range c.sectionList {
|
||||||
|
if secName == section {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
// Remove from section and key list.
|
||||||
|
c.sectionList = append(c.sectionList[:i], c.sectionList[i+1:]...)
|
||||||
|
delete(c.keyList, section)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSection returns key-value pairs in given section.
|
||||||
|
// It section does not exist, returns nil and error.
|
||||||
|
func (c *ConfigFile) GetSection(section string) (map[string]string, error) {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if section exists.
|
||||||
|
if _, ok := c.data[section]; !ok {
|
||||||
|
// Section does not exist.
|
||||||
|
return nil, getError{ERR_SECTION_NOT_FOUND, section}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove pre-defined key.
|
||||||
|
secMap := c.data[section]
|
||||||
|
delete(c.data[section], " ")
|
||||||
|
|
||||||
|
// Section exists.
|
||||||
|
return secMap, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSectionComments adds new section comments to the configuration.
|
||||||
|
// If comments are empty(0 length), it will remove its section comments!
|
||||||
|
// It returns true if the comments were inserted or removed,
|
||||||
|
// or returns false if the comments were overwritten.
|
||||||
|
func (c *ConfigFile) SetSectionComments(section, comments string) bool {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(comments) == 0 {
|
||||||
|
if _, ok := c.sectionComments[section]; ok {
|
||||||
|
delete(c.sectionComments, section)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not exists can be seen as remove.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if comments exists.
|
||||||
|
_, ok := c.sectionComments[section]
|
||||||
|
if comments[0] != '#' && comments[0] != ';' {
|
||||||
|
comments = "; " + comments
|
||||||
|
}
|
||||||
|
c.sectionComments[section] = comments
|
||||||
|
return !ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKeyComments adds new section-key comments to the configuration.
|
||||||
|
// If comments are empty(0 length), it will remove its section-key comments!
|
||||||
|
// It returns true if the comments were inserted or removed,
|
||||||
|
// or returns false if the comments were overwritten.
|
||||||
|
// If the section does not exist in advance, it is created.
|
||||||
|
func (c *ConfigFile) SetKeyComments(section, key, comments string) bool {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if section exists.
|
||||||
|
if _, ok := c.keyComments[section]; ok {
|
||||||
|
if len(comments) == 0 {
|
||||||
|
if _, ok := c.keyComments[section][key]; ok {
|
||||||
|
delete(c.keyComments[section], key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not exists can be seen as remove.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(comments) == 0 {
|
||||||
|
// Not exists can be seen as remove.
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
// Execute add operation.
|
||||||
|
c.keyComments[section] = make(map[string]string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if key exists.
|
||||||
|
_, ok := c.keyComments[section][key]
|
||||||
|
if comments[0] != '#' && comments[0] != ';' {
|
||||||
|
comments = "; " + comments
|
||||||
|
}
|
||||||
|
c.keyComments[section][key] = comments
|
||||||
|
return !ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSectionComments returns the comments in the given section.
|
||||||
|
// It returns an empty string(0 length) if the comments do not exist.
|
||||||
|
func (c *ConfigFile) GetSectionComments(section string) (comments string) {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
return c.sectionComments[section]
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetKeyComments returns the comments of key in the given section.
|
||||||
|
// It returns an empty string(0 length) if the comments do not exist.
|
||||||
|
func (c *ConfigFile) GetKeyComments(section, key string) (comments string) {
|
||||||
|
// Blank section name represents DEFAULT section.
|
||||||
|
if len(section) == 0 {
|
||||||
|
section = DEFAULT_SECTION
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := c.keyComments[section]; ok {
|
||||||
|
return c.keyComments[section][key]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// getError occurs when get value in configuration file with invalid parameter.
|
||||||
|
type getError struct {
|
||||||
|
Reason ParseError
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements Error interface.
|
||||||
|
func (err getError) Error() string {
|
||||||
|
switch err.Reason {
|
||||||
|
case ERR_SECTION_NOT_FOUND:
|
||||||
|
return fmt.Sprintf("section '%s' not found", err.Name)
|
||||||
|
case ERR_KEY_NOT_FOUND:
|
||||||
|
return fmt.Sprintf("key '%s' not found", err.Name)
|
||||||
|
}
|
||||||
|
return "invalid get error"
|
||||||
|
}
|
294
vendor/github.com/Unknwon/goconfig/read.go
generated
vendored
Normal file
294
vendor/github.com/Unknwon/goconfig/read.go
generated
vendored
Normal file
|
@ -0,0 +1,294 @@
|
||||||
|
// Copyright 2013 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package goconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Read reads an io.Reader and returns a configuration representation.
|
||||||
|
// This representation can be queried with GetValue.
|
||||||
|
func (c *ConfigFile) read(reader io.Reader) (err error) {
|
||||||
|
buf := bufio.NewReader(reader)
|
||||||
|
|
||||||
|
// Handle BOM-UTF8.
|
||||||
|
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
|
||||||
|
mask, err := buf.Peek(3)
|
||||||
|
if err == nil && len(mask) >= 3 &&
|
||||||
|
mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
|
||||||
|
buf.Read(mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
count := 1 // Counter for auto increment.
|
||||||
|
// Current section name.
|
||||||
|
section := DEFAULT_SECTION
|
||||||
|
var comments string
|
||||||
|
// Parse line-by-line
|
||||||
|
for {
|
||||||
|
line, err := buf.ReadString('\n')
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
lineLengh := len(line) //[SWH|+]
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reached end of file, if nothing to read then break,
|
||||||
|
// otherwise handle the last line.
|
||||||
|
if lineLengh == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// switch written for readability (not performance)
|
||||||
|
switch {
|
||||||
|
case lineLengh == 0: // Empty line
|
||||||
|
continue
|
||||||
|
case line[0] == '#' || line[0] == ';': // Comment
|
||||||
|
// Append comments
|
||||||
|
if len(comments) == 0 {
|
||||||
|
comments = line
|
||||||
|
} else {
|
||||||
|
comments += LineBreak + line
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case line[0] == '[' && line[lineLengh-1] == ']': // New sction.
|
||||||
|
// Get section name.
|
||||||
|
section = strings.TrimSpace(line[1 : lineLengh-1])
|
||||||
|
// Set section comments and empty if it has comments.
|
||||||
|
if len(comments) > 0 {
|
||||||
|
c.SetSectionComments(section, comments)
|
||||||
|
comments = ""
|
||||||
|
}
|
||||||
|
// Make section exist even though it does not have any key.
|
||||||
|
c.SetValue(section, " ", " ")
|
||||||
|
// Reset counter.
|
||||||
|
count = 1
|
||||||
|
continue
|
||||||
|
case section == "": // No section defined so far
|
||||||
|
return readError{ERR_BLANK_SECTION_NAME, line}
|
||||||
|
default: // Other alternatives
|
||||||
|
var (
|
||||||
|
i int
|
||||||
|
keyQuote string
|
||||||
|
key string
|
||||||
|
valQuote string
|
||||||
|
value string
|
||||||
|
)
|
||||||
|
//[SWH|+]:支持引号包围起来的字串
|
||||||
|
if line[0] == '"' {
|
||||||
|
if lineLengh >= 6 && line[0:3] == `"""` {
|
||||||
|
keyQuote = `"""`
|
||||||
|
} else {
|
||||||
|
keyQuote = `"`
|
||||||
|
}
|
||||||
|
} else if line[0] == '`' {
|
||||||
|
keyQuote = "`"
|
||||||
|
}
|
||||||
|
if keyQuote != "" {
|
||||||
|
qLen := len(keyQuote)
|
||||||
|
pos := strings.Index(line[qLen:], keyQuote)
|
||||||
|
if pos == -1 {
|
||||||
|
return readError{ERR_COULD_NOT_PARSE, line}
|
||||||
|
}
|
||||||
|
pos = pos + qLen
|
||||||
|
i = strings.IndexAny(line[pos:], "=:")
|
||||||
|
if i <= 0 {
|
||||||
|
return readError{ERR_COULD_NOT_PARSE, line}
|
||||||
|
}
|
||||||
|
i = i + pos
|
||||||
|
key = line[qLen:pos] //保留引号内的两端的空格
|
||||||
|
} else {
|
||||||
|
i = strings.IndexAny(line, "=:")
|
||||||
|
if i <= 0 {
|
||||||
|
return readError{ERR_COULD_NOT_PARSE, line}
|
||||||
|
}
|
||||||
|
key = strings.TrimSpace(line[0:i])
|
||||||
|
}
|
||||||
|
//[SWH|+];
|
||||||
|
|
||||||
|
// Check if it needs auto increment.
|
||||||
|
if key == "-" {
|
||||||
|
key = "#" + fmt.Sprint(count)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
//[SWH|+]:支持引号包围起来的字串
|
||||||
|
lineRight := strings.TrimSpace(line[i+1:])
|
||||||
|
lineRightLength := len(lineRight)
|
||||||
|
firstChar := ""
|
||||||
|
if lineRightLength >= 2 {
|
||||||
|
firstChar = lineRight[0:1]
|
||||||
|
}
|
||||||
|
if firstChar == "`" {
|
||||||
|
valQuote = "`"
|
||||||
|
} else if lineRightLength >= 6 && lineRight[0:3] == `"""` {
|
||||||
|
valQuote = `"""`
|
||||||
|
}
|
||||||
|
if valQuote != "" {
|
||||||
|
qLen := len(valQuote)
|
||||||
|
pos := strings.LastIndex(lineRight[qLen:], valQuote)
|
||||||
|
if pos == -1 {
|
||||||
|
return readError{ERR_COULD_NOT_PARSE, line}
|
||||||
|
}
|
||||||
|
pos = pos + qLen
|
||||||
|
value = lineRight[qLen:pos]
|
||||||
|
} else {
|
||||||
|
value = strings.TrimSpace(lineRight[0:])
|
||||||
|
}
|
||||||
|
//[SWH|+];
|
||||||
|
|
||||||
|
c.SetValue(section, key, value)
|
||||||
|
// Set key comments and empty if it has comments.
|
||||||
|
if len(comments) > 0 {
|
||||||
|
c.SetKeyComments(section, key, comments)
|
||||||
|
comments = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reached end of file.
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromData accepts raw data directly from memory
|
||||||
|
// and returns a new configuration representation.
|
||||||
|
// Note that the configuration is written to the system
|
||||||
|
// temporary folder, so your file should not contain
|
||||||
|
// sensitive information.
|
||||||
|
func LoadFromData(data []byte) (c *ConfigFile, err error) {
|
||||||
|
// Save memory data to temporary file to support further operations.
|
||||||
|
tmpName := path.Join(os.TempDir(), "goconfig", fmt.Sprintf("%d", time.Now().Nanosecond()))
|
||||||
|
if err = os.MkdirAll(path.Dir(tmpName), os.ModePerm); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = ioutil.WriteFile(tmpName, data, 0655); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c = newConfigFile([]string{tmpName})
|
||||||
|
err = c.read(bytes.NewBuffer(data))
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromReader accepts raw data directly from a reader
|
||||||
|
// and returns a new configuration representation.
|
||||||
|
// You must use ReloadData to reload.
|
||||||
|
// You cannot append files a configfile read this way.
|
||||||
|
func LoadFromReader(in io.Reader) (c *ConfigFile, err error) {
|
||||||
|
c = newConfigFile([]string{""})
|
||||||
|
err = c.read(in)
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConfigFile) loadFile(fileName string) (err error) {
|
||||||
|
f, err := os.Open(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return c.read(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfigFile reads a file and returns a new configuration representation.
|
||||||
|
// This representation can be queried with GetValue.
|
||||||
|
func LoadConfigFile(fileName string, moreFiles ...string) (c *ConfigFile, err error) {
|
||||||
|
// Append files' name together.
|
||||||
|
fileNames := make([]string, 1, len(moreFiles)+1)
|
||||||
|
fileNames[0] = fileName
|
||||||
|
if len(moreFiles) > 0 {
|
||||||
|
fileNames = append(fileNames, moreFiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
c = newConfigFile(fileNames)
|
||||||
|
|
||||||
|
for _, name := range fileNames {
|
||||||
|
if err = c.loadFile(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload reloads configuration file in case it has changes.
|
||||||
|
func (c *ConfigFile) Reload() (err error) {
|
||||||
|
var cfg *ConfigFile
|
||||||
|
if len(c.fileNames) == 1 {
|
||||||
|
if c.fileNames[0] == "" {
|
||||||
|
return fmt.Errorf("file opened from in-memory data, use ReloadData to reload")
|
||||||
|
}
|
||||||
|
cfg, err = LoadConfigFile(c.fileNames[0])
|
||||||
|
} else {
|
||||||
|
cfg, err = LoadConfigFile(c.fileNames[0], c.fileNames[1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
*c = *cfg
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadData reloads configuration file from memory
|
||||||
|
func (c *ConfigFile) ReloadData(in io.Reader) (err error) {
|
||||||
|
var cfg *ConfigFile
|
||||||
|
if len(c.fileNames) != 1 {
|
||||||
|
return fmt.Errorf("Multiple files loaded, unable to mix in-memory and file data")
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err = LoadFromReader(in)
|
||||||
|
if err == nil {
|
||||||
|
*c = *cfg
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendFiles appends more files to ConfigFile and reload automatically.
|
||||||
|
func (c *ConfigFile) AppendFiles(files ...string) error {
|
||||||
|
if len(c.fileNames) == 1 && c.fileNames[0] == "" {
|
||||||
|
return fmt.Errorf("Cannot append file data to in-memory data")
|
||||||
|
}
|
||||||
|
c.fileNames = append(c.fileNames, files...)
|
||||||
|
return c.Reload()
|
||||||
|
}
|
||||||
|
|
||||||
|
// readError occurs when read configuration file with wrong format.
|
||||||
|
type readError struct {
|
||||||
|
Reason ParseError
|
||||||
|
Content string // Line content
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implement Error interface.
|
||||||
|
func (err readError) Error() string {
|
||||||
|
switch err.Reason {
|
||||||
|
case ERR_BLANK_SECTION_NAME:
|
||||||
|
return "empty section name not allowed"
|
||||||
|
case ERR_COULD_NOT_PARSE:
|
||||||
|
return fmt.Sprintf("could not parse line: %s", string(err.Content))
|
||||||
|
}
|
||||||
|
return "invalid read error"
|
||||||
|
}
|
117
vendor/github.com/Unknwon/goconfig/write.go
generated
vendored
Normal file
117
vendor/github.com/Unknwon/goconfig/write.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
// Copyright 2013 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package goconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Write spaces around "=" to look better.
|
||||||
|
var PrettyFormat = true
|
||||||
|
|
||||||
|
// SaveConfigData writes configuration to a writer
|
||||||
|
func SaveConfigData(c *ConfigFile, out io.Writer) (err error) {
|
||||||
|
equalSign := "="
|
||||||
|
if PrettyFormat {
|
||||||
|
equalSign = " = "
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
for _, section := range c.sectionList {
|
||||||
|
// Write section comments.
|
||||||
|
if len(c.GetSectionComments(section)) > 0 {
|
||||||
|
if _, err = buf.WriteString(c.GetSectionComments(section) + LineBreak); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if section != DEFAULT_SECTION {
|
||||||
|
// Write section name.
|
||||||
|
if _, err = buf.WriteString("[" + section + "]" + LineBreak); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range c.keyList[section] {
|
||||||
|
if key != " " {
|
||||||
|
// Write key comments.
|
||||||
|
if len(c.GetKeyComments(section, key)) > 0 {
|
||||||
|
if _, err = buf.WriteString(c.GetKeyComments(section, key) + LineBreak); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
keyName := key
|
||||||
|
// Check if it's auto increment.
|
||||||
|
if keyName[0] == '#' {
|
||||||
|
keyName = "-"
|
||||||
|
}
|
||||||
|
//[SWH|+]:支持键名包含等号和冒号
|
||||||
|
if strings.Contains(keyName, `=`) || strings.Contains(keyName, `:`) {
|
||||||
|
if strings.Contains(keyName, "`") {
|
||||||
|
if strings.Contains(keyName, `"`) {
|
||||||
|
keyName = `"""` + keyName + `"""`
|
||||||
|
} else {
|
||||||
|
keyName = `"` + keyName + `"`
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
keyName = "`" + keyName + "`"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
value := c.data[section][key]
|
||||||
|
// In case key value contains "`" or "\"".
|
||||||
|
if strings.Contains(value, "`") {
|
||||||
|
if strings.Contains(value, `"`) {
|
||||||
|
value = `"""` + value + `"""`
|
||||||
|
} else {
|
||||||
|
value = `"` + value + `"`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write key and value.
|
||||||
|
if _, err = buf.WriteString(keyName + equalSign + value + LineBreak); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put a line between sections.
|
||||||
|
if _, err = buf.WriteString(LineBreak); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := buf.WriteTo(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveConfigFile writes configuration file to local file system
|
||||||
|
func SaveConfigFile(c *ConfigFile, filename string) (err error) {
|
||||||
|
// Write configuration file by filename.
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.Create(filename); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := SaveConfigData(c, f); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
2
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
2
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
.DS_Store
|
||||||
|
.*.sw?
|
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2013 VividCortex
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
142
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
142
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
# EWMA
|
||||||
|
|
||||||
|
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||||
|
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||||
|
|
||||||
|
![Build Status](https://circleci.com/gh/VividCortex/moving_average.png?circle-token=1459fa37f9ca0e50cef05d1963146d96d47ea523)
|
||||||
|
|
||||||
|
### Exponentially Weighted Moving Average
|
||||||
|
|
||||||
|
An exponentially weighted moving average is a way to continuously compute a type of
|
||||||
|
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||||
|
added to the average, its weight in the average decreases exponentially over time. This
|
||||||
|
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||||
|
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||||
|
the recent central tendency of the series of values.
|
||||||
|
|
||||||
|
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||||
|
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||||
|
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||||
|
|
||||||
|
The algorithm works thus, in pseudocode:
|
||||||
|
|
||||||
|
1. Multiply the next number in the series by alpha.
|
||||||
|
2. Multiply the current value of the average by 1 minus alpha.
|
||||||
|
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||||
|
4. Repeat for each number in the series.
|
||||||
|
|
||||||
|
There are special-case behaviors for how to initialize the current value, and these vary
|
||||||
|
between implementations. One approach is to start with the first value in the series;
|
||||||
|
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||||
|
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||||
|
|
||||||
|
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||||
|
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||||
|
|
||||||
|
![Data Series](http://f.cl.ly/items/2W0I230b3b1B3p3o181O/data%20series.png)
|
||||||
|
|
||||||
|
Now let's take the moving average of those numbers. First we set the average to the value
|
||||||
|
of the first number.
|
||||||
|
|
||||||
|
![EWMA Step 1](http://f.cl.ly/items/003E0i1T1H2t373n3L3g/ewma-1.png)
|
||||||
|
|
||||||
|
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||||
|
them to generate a new value.
|
||||||
|
|
||||||
|
![EWMA Step 2](http://f.cl.ly/items/2W2Z0b3J18122y1F3F2u/ewma-2.png)
|
||||||
|
|
||||||
|
This continues until we are done.
|
||||||
|
|
||||||
|
![EWMA Step N](http://f.cl.ly/items/0R3Y2V2o1t2Q1B082L3c/ewma.png)
|
||||||
|
|
||||||
|
Notice how each of the values in the series decays by half each time a new value
|
||||||
|
is added, and the top of the bars in the lower portion of the image represents the
|
||||||
|
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||||
|
series.
|
||||||
|
|
||||||
|
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||||
|
|
||||||
|
### Choosing Alpha
|
||||||
|
|
||||||
|
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||||
|
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||||
|
|
||||||
|
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||||
|
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||||
|
"Production and Operations Analysis" by Steven Nahmias.
|
||||||
|
|
||||||
|
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||||
|
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||||
|
is the constant alpha used for this repository's SimpleEWMA.
|
||||||
|
|
||||||
|
### Implementations
|
||||||
|
|
||||||
|
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||||
|
|
||||||
|
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||||
|
that type.
|
||||||
|
|
||||||
|
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||||
|
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||||
|
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||||
|
this package will not support your needs at present.
|
||||||
|
|
||||||
|
#### SimpleEWMA
|
||||||
|
|
||||||
|
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||||
|
for multiple reasons. It has no warm-up period and it uses a constant
|
||||||
|
decay. These properties let it use less memory. It will also behave
|
||||||
|
differently when it's equal to zero, which is assumed to mean
|
||||||
|
uninitialized, so if a value is likely to actually become zero over time,
|
||||||
|
then any non-zero value will cause a sharp jump instead of a small change.
|
||||||
|
|
||||||
|
#### VariableEWMA
|
||||||
|
|
||||||
|
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||||
|
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||||
|
until you have added the required number of samples to it. It uses some memory to store the
|
||||||
|
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### API Documentation
|
||||||
|
|
||||||
|
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
import "github.com/VividCortex/ewma"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
samples := [100]float64{
|
||||||
|
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||||
|
}
|
||||||
|
|
||||||
|
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||||
|
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||||
|
|
||||||
|
for _, f := range samples {
|
||||||
|
e.Add(f)
|
||||||
|
a.Add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Value() //=> 13.577404704631077
|
||||||
|
a.Value() //=> 1.5806140565521463e-12
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
We only accept pull requests for minor fixes or improvements. This includes:
|
||||||
|
|
||||||
|
* Small bug fixes
|
||||||
|
* Typos
|
||||||
|
* Documentation or comments
|
||||||
|
|
||||||
|
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||||
|
so we recommend forking the repository and making changes in your fork for your use case.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||||
|
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
// Package ewma implements exponentially weighted moving averages.
|
||||||
|
package ewma
|
||||||
|
|
||||||
|
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||||
|
// Please see the LICENSE file for applicable license terms.
|
||||||
|
|
||||||
|
const (
|
||||||
|
// By default, we average over a one-minute period, which means the average
|
||||||
|
// age of the metrics in the period is 30 seconds.
|
||||||
|
AVG_METRIC_AGE float64 = 30.0
|
||||||
|
|
||||||
|
// The formula for computing the decay factor from the average age comes
|
||||||
|
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||||
|
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||||
|
|
||||||
|
// For best results, the moving average should not be initialized to the
|
||||||
|
// samples it sees immediately. The book "Production and Operations
|
||||||
|
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||||
|
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||||
|
// many samples, it is not "ready" to be queried for the value of the
|
||||||
|
// moving average. This adds some memory cost.
|
||||||
|
WARMUP_SAMPLES uint8 = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
// MovingAverage is the interface that computes a moving average over a time-
|
||||||
|
// series stream of numbers. The average may be over a window or exponentially
|
||||||
|
// decaying.
|
||||||
|
type MovingAverage interface {
|
||||||
|
Add(float64)
|
||||||
|
Value() float64
|
||||||
|
Set(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||||
|
// desired characteristics in the moving window or exponential decay. If no
|
||||||
|
// age is given, it constructs a default exponentially weighted implementation
|
||||||
|
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||||
|
// by the formula given for the DECAY constant. It signifies the average age
|
||||||
|
// of the samples as time goes to infinity.
|
||||||
|
func NewMovingAverage(age ...float64) MovingAverage {
|
||||||
|
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||||
|
return new(SimpleEWMA)
|
||||||
|
}
|
||||||
|
return &VariableEWMA{
|
||||||
|
decay: 2 / (age[0] + 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||||
|
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||||
|
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||||
|
// decay. These properties let it use less memory. It will also behave
|
||||||
|
// differently when it's equal to zero, which is assumed to mean
|
||||||
|
// uninitialized, so if a value is likely to actually become zero over time,
|
||||||
|
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||||
|
// However, note that this takes a long time, and the value may just
|
||||||
|
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||||
|
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||||
|
type SimpleEWMA struct {
|
||||||
|
// The current value of the average. After adding with Add(), this is
|
||||||
|
// updated to reflect the average of all values seen thus far.
|
||||||
|
value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the series and updates the moving average.
|
||||||
|
func (e *SimpleEWMA) Add(value float64) {
|
||||||
|
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||||
|
e.value = value
|
||||||
|
} else {
|
||||||
|
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the current value of the moving average.
|
||||||
|
func (e *SimpleEWMA) Value() float64 {
|
||||||
|
return e.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the EWMA's value.
|
||||||
|
func (e *SimpleEWMA) Set(value float64) {
|
||||||
|
e.value = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||||
|
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||||
|
type VariableEWMA struct {
|
||||||
|
// The multiplier factor by which the previous samples decay.
|
||||||
|
decay float64
|
||||||
|
// The current value of the average.
|
||||||
|
value float64
|
||||||
|
// The number of samples added to this instance.
|
||||||
|
count uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the series and updates the moving average.
|
||||||
|
func (e *VariableEWMA) Add(value float64) {
|
||||||
|
switch {
|
||||||
|
case e.count < WARMUP_SAMPLES:
|
||||||
|
e.count++
|
||||||
|
e.value += value
|
||||||
|
case e.count == WARMUP_SAMPLES:
|
||||||
|
e.count++
|
||||||
|
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||||
|
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||||
|
default:
|
||||||
|
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||||
|
// warmed up yet.
|
||||||
|
func (e *VariableEWMA) Value() float64 {
|
||||||
|
if e.count <= WARMUP_SAMPLES {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the EWMA's value.
|
||||||
|
func (e *VariableEWMA) Set(value float64) {
|
||||||
|
e.value = value
|
||||||
|
if e.count <= WARMUP_SAMPLES {
|
||||||
|
e.count = WARMUP_SAMPLES + 1
|
||||||
|
}
|
||||||
|
}
|
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
Normal file
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
Normal file
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
AWS SDK for Go
|
||||||
|
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
Copyright 2014-2015 Stripe, Inc.
|
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
// Package awserr represents API error interface accessors for the SDK.
|
||||||
|
package awserr
|
||||||
|
|
||||||
|
// An Error wraps lower level errors with code, message and an original error.
|
||||||
|
// The underlying concrete error type may also satisfy other interfaces which
|
||||||
|
// can be to used to obtain more specific information about the error.
|
||||||
|
//
|
||||||
|
// Calling Error() or String() will always include the full information about
|
||||||
|
// an error based on its underlying type.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// output, err := s3manage.Upload(svc, input, opts)
|
||||||
|
// if err != nil {
|
||||||
|
// if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
// // Get error details
|
||||||
|
// log.Println("Error:", awsErr.Code(), awsErr.Message())
|
||||||
|
//
|
||||||
|
// // Prints out full error message, including original error if there was one.
|
||||||
|
// log.Println("Error:", awsErr.Error())
|
||||||
|
//
|
||||||
|
// // Get original error
|
||||||
|
// if origErr := awsErr.OrigErr(); origErr != nil {
|
||||||
|
// // operate on original error.
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// fmt.Println(err.Error())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type Error interface {
|
||||||
|
// Satisfy the generic error interface.
|
||||||
|
error
|
||||||
|
|
||||||
|
// Returns the short phrase depicting the classification of the error.
|
||||||
|
Code() string
|
||||||
|
|
||||||
|
// Returns the error details message.
|
||||||
|
Message() string
|
||||||
|
|
||||||
|
// Returns the original error if one was set. Nil is returned if not set.
|
||||||
|
OrigErr() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchError is a batch of errors which also wraps lower level errors with
|
||||||
|
// code, message, and original errors. Calling Error() will include all errors
|
||||||
|
// that occurred in the batch.
|
||||||
|
//
|
||||||
|
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
||||||
|
// compatibility.
|
||||||
|
type BatchError interface {
|
||||||
|
// Satisfy the generic error interface.
|
||||||
|
error
|
||||||
|
|
||||||
|
// Returns the short phrase depicting the classification of the error.
|
||||||
|
Code() string
|
||||||
|
|
||||||
|
// Returns the error details message.
|
||||||
|
Message() string
|
||||||
|
|
||||||
|
// Returns the original error if one was set. Nil is returned if not set.
|
||||||
|
OrigErrs() []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
||||||
|
// code, message, and original errors. Calling Error() will include all errors
|
||||||
|
// that occurred in the batch.
|
||||||
|
//
|
||||||
|
// Replaces BatchError
|
||||||
|
type BatchedErrors interface {
|
||||||
|
// Satisfy the base Error interface.
|
||||||
|
Error
|
||||||
|
|
||||||
|
// Returns the original error if one was set. Nil is returned if not set.
|
||||||
|
OrigErrs() []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an Error object described by the code, message, and origErr.
|
||||||
|
//
|
||||||
|
// If origErr satisfies the Error interface it will not be wrapped within a new
|
||||||
|
// Error object and will instead be returned.
|
||||||
|
func New(code, message string, origErr error) Error {
|
||||||
|
var errs []error
|
||||||
|
if origErr != nil {
|
||||||
|
errs = append(errs, origErr)
|
||||||
|
}
|
||||||
|
return newBaseError(code, message, errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
||||||
|
// array of errors.
|
||||||
|
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
||||||
|
return newBaseError(code, message, errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RequestFailure is an interface to extract request failure information from
|
||||||
|
// an Error such as the request ID of the failed request returned by a service.
|
||||||
|
// RequestFailures may not always have a requestID value if the request failed
|
||||||
|
// prior to reaching the service such as a connection error.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// output, err := s3manage.Upload(svc, input, opts)
|
||||||
|
// if err != nil {
|
||||||
|
// if reqerr, ok := err.(RequestFailure); ok {
|
||||||
|
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||||
|
// } else {
|
||||||
|
// log.Println("Error:", err.Error())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Combined with awserr.Error:
|
||||||
|
//
|
||||||
|
// output, err := s3manage.Upload(svc, input, opts)
|
||||||
|
// if err != nil {
|
||||||
|
// if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
// // Generic AWS Error with Code, Message, and original error (if any)
|
||||||
|
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||||
|
//
|
||||||
|
// if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||||
|
// // A service error occurred
|
||||||
|
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// fmt.Println(err.Error())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type RequestFailure interface {
|
||||||
|
Error
|
||||||
|
|
||||||
|
// The status code of the HTTP response.
|
||||||
|
StatusCode() int
|
||||||
|
|
||||||
|
// The request ID returned by the service for a request failure. This will
|
||||||
|
// be empty if no request ID is available such as the request failed due
|
||||||
|
// to a connection error.
|
||||||
|
RequestID() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequestFailure returns a new request error wrapper for the given Error
|
||||||
|
// provided.
|
||||||
|
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
|
||||||
|
return newRequestError(err, statusCode, reqID)
|
||||||
|
}
|
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
|
@ -0,0 +1,194 @@
|
||||||
|
package awserr
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// SprintError returns a string of the formatted error code.
|
||||||
|
//
|
||||||
|
// Both extra and origErr are optional. If they are included their lines
|
||||||
|
// will be added, but if they are not included their lines will be ignored.
|
||||||
|
func SprintError(code, message, extra string, origErr error) string {
|
||||||
|
msg := fmt.Sprintf("%s: %s", code, message)
|
||||||
|
if extra != "" {
|
||||||
|
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
|
||||||
|
}
|
||||||
|
if origErr != nil {
|
||||||
|
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// A baseError wraps the code and message which defines an error. It also
|
||||||
|
// can be used to wrap an original error object.
|
||||||
|
//
|
||||||
|
// Should be used as the root for errors satisfying the awserr.Error. Also
|
||||||
|
// for any error which does not fit into a specific error wrapper type.
|
||||||
|
type baseError struct {
|
||||||
|
// Classification of error
|
||||||
|
code string
|
||||||
|
|
||||||
|
// Detailed information about error
|
||||||
|
message string
|
||||||
|
|
||||||
|
// Optional original error this error is based off of. Allows building
|
||||||
|
// chained errors.
|
||||||
|
errs []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBaseError returns an error object for the code, message, and errors.
|
||||||
|
//
|
||||||
|
// code is a short no whitespace phrase depicting the classification of
|
||||||
|
// the error that is being created.
|
||||||
|
//
|
||||||
|
// message is the free flow string containing detailed information about the
|
||||||
|
// error.
|
||||||
|
//
|
||||||
|
// origErrs is the error objects which will be nested under the new errors to
|
||||||
|
// be returned.
|
||||||
|
func newBaseError(code, message string, origErrs []error) *baseError {
|
||||||
|
b := &baseError{
|
||||||
|
code: code,
|
||||||
|
message: message,
|
||||||
|
errs: origErrs,
|
||||||
|
}
|
||||||
|
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string representation of the error.
|
||||||
|
//
|
||||||
|
// See ErrorWithExtra for formatting.
|
||||||
|
//
|
||||||
|
// Satisfies the error interface.
|
||||||
|
func (b baseError) Error() string {
|
||||||
|
size := len(b.errs)
|
||||||
|
if size > 0 {
|
||||||
|
return SprintError(b.code, b.message, "", errorList(b.errs))
|
||||||
|
}
|
||||||
|
|
||||||
|
return SprintError(b.code, b.message, "", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the error.
|
||||||
|
// Alias for Error to satisfy the stringer interface.
|
||||||
|
func (b baseError) String() string {
|
||||||
|
return b.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the short phrase depicting the classification of the error.
|
||||||
|
func (b baseError) Code() string {
|
||||||
|
return b.code
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returns the error details message.
|
||||||
|
func (b baseError) Message() string {
|
||||||
|
return b.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr returns the original error if one was set. Nil is returned if no
|
||||||
|
// error was set. This only returns the first element in the list. If the full
|
||||||
|
// list is needed, use BatchedErrors.
|
||||||
|
func (b baseError) OrigErr() error {
|
||||||
|
switch len(b.errs) {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
return b.errs[0]
|
||||||
|
default:
|
||||||
|
if err, ok := b.errs[0].(Error); ok {
|
||||||
|
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
||||||
|
}
|
||||||
|
return NewBatchError("BatchedErrors",
|
||||||
|
"multiple errors occurred", b.errs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||||
|
// returned if no error was set.
|
||||||
|
func (b baseError) OrigErrs() []error {
|
||||||
|
return b.errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// So that the Error interface type can be included as an anonymous field
|
||||||
|
// in the requestError struct and not conflict with the error.Error() method.
|
||||||
|
type awsError Error
|
||||||
|
|
||||||
|
// A requestError wraps a request or service error.
|
||||||
|
//
|
||||||
|
// Composed of baseError for code, message, and original error.
|
||||||
|
type requestError struct {
|
||||||
|
awsError
|
||||||
|
statusCode int
|
||||||
|
requestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRequestError returns a wrapped error with additional information for
|
||||||
|
// request status code, and service requestID.
|
||||||
|
//
|
||||||
|
// Should be used to wrap all request which involve service requests. Even if
|
||||||
|
// the request failed without a service response, but had an HTTP status code
|
||||||
|
// that may be meaningful.
|
||||||
|
//
|
||||||
|
// Also wraps original errors via the baseError.
|
||||||
|
func newRequestError(err Error, statusCode int, requestID string) *requestError {
|
||||||
|
return &requestError{
|
||||||
|
awsError: err,
|
||||||
|
statusCode: statusCode,
|
||||||
|
requestID: requestID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string representation of the error.
|
||||||
|
// Satisfies the error interface.
|
||||||
|
func (r requestError) Error() string {
|
||||||
|
extra := fmt.Sprintf("status code: %d, request id: %s",
|
||||||
|
r.statusCode, r.requestID)
|
||||||
|
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the error.
|
||||||
|
// Alias for Error to satisfy the stringer interface.
|
||||||
|
func (r requestError) String() string {
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusCode returns the wrapped status code for the error
|
||||||
|
func (r requestError) StatusCode() int {
|
||||||
|
return r.statusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestID returns the wrapped requestID
|
||||||
|
func (r requestError) RequestID() string {
|
||||||
|
return r.requestID
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||||
|
// returned if no error was set.
|
||||||
|
func (r requestError) OrigErrs() []error {
|
||||||
|
if b, ok := r.awsError.(BatchedErrors); ok {
|
||||||
|
return b.OrigErrs()
|
||||||
|
}
|
||||||
|
return []error{r.OrigErr()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An error list that satisfies the golang interface
|
||||||
|
type errorList []error
|
||||||
|
|
||||||
|
// Error returns the string representation of the error.
|
||||||
|
//
|
||||||
|
// Satisfies the error interface.
|
||||||
|
func (e errorList) Error() string {
|
||||||
|
msg := ""
|
||||||
|
// How do we want to handle the array size being zero
|
||||||
|
if size := len(e); size > 0 {
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
msg += fmt.Sprintf("%s", e[i].Error())
|
||||||
|
// We check the next index to see if it is within the slice.
|
||||||
|
// If it is, then we append a newline. We do this, because unit tests
|
||||||
|
// could be broken with the additional '\n'
|
||||||
|
if i+1 < size {
|
||||||
|
msg += "\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
108
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
108
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copy deeply copies a src structure to dst. Useful for copying request and
|
||||||
|
// response structures.
|
||||||
|
//
|
||||||
|
// Can copy between structs of different type, but will only copy fields which
|
||||||
|
// are assignable, and exist in both structs. Fields which are not assignable,
|
||||||
|
// or do not exist in both structs are ignored.
|
||||||
|
func Copy(dst, src interface{}) {
|
||||||
|
dstval := reflect.ValueOf(dst)
|
||||||
|
if !dstval.IsValid() {
|
||||||
|
panic("Copy dst cannot be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
rcopy(dstval, reflect.ValueOf(src), true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyOf returns a copy of src while also allocating the memory for dst.
|
||||||
|
// src must be a pointer type or this operation will fail.
|
||||||
|
func CopyOf(src interface{}) (dst interface{}) {
|
||||||
|
dsti := reflect.New(reflect.TypeOf(src).Elem())
|
||||||
|
dst = dsti.Interface()
|
||||||
|
rcopy(dsti, reflect.ValueOf(src), true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// rcopy performs a recursive copy of values from the source to destination.
|
||||||
|
//
|
||||||
|
// root is used to skip certain aspects of the copy which are not valid
|
||||||
|
// for the root node of a object.
|
||||||
|
func rcopy(dst, src reflect.Value, root bool) {
|
||||||
|
if !src.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch src.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if _, ok := src.Interface().(io.Reader); ok {
|
||||||
|
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
|
||||||
|
dst.Elem().Set(src)
|
||||||
|
} else if dst.CanSet() {
|
||||||
|
dst.Set(src)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
e := src.Type().Elem()
|
||||||
|
if dst.CanSet() && !src.IsNil() {
|
||||||
|
if _, ok := src.Interface().(*time.Time); !ok {
|
||||||
|
dst.Set(reflect.New(e))
|
||||||
|
} else {
|
||||||
|
tempValue := reflect.New(e)
|
||||||
|
tempValue.Elem().Set(src.Elem())
|
||||||
|
// Sets time.Time's unexported values
|
||||||
|
dst.Set(tempValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if src.Elem().IsValid() {
|
||||||
|
// Keep the current root state since the depth hasn't changed
|
||||||
|
rcopy(dst.Elem(), src.Elem(), root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
t := dst.Type()
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
name := t.Field(i).Name
|
||||||
|
srcVal := src.FieldByName(name)
|
||||||
|
dstVal := dst.FieldByName(name)
|
||||||
|
if srcVal.IsValid() && dstVal.CanSet() {
|
||||||
|
rcopy(dstVal, srcVal, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
if src.IsNil() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
|
||||||
|
dst.Set(s)
|
||||||
|
for i := 0; i < src.Len(); i++ {
|
||||||
|
rcopy(dst.Index(i), src.Index(i), false)
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
if src.IsNil() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
s := reflect.MakeMap(src.Type())
|
||||||
|
dst.Set(s)
|
||||||
|
for _, k := range src.MapKeys() {
|
||||||
|
v := src.MapIndex(k)
|
||||||
|
v2 := reflect.New(v.Type()).Elem()
|
||||||
|
rcopy(v2, v, false)
|
||||||
|
dst.SetMapIndex(k, v2)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Assign the value if possible. If its not assignable, the value would
|
||||||
|
// need to be converted and the impact of that may be unexpected, or is
|
||||||
|
// not compatible with the dst type.
|
||||||
|
if src.Type().AssignableTo(dst.Type()) {
|
||||||
|
dst.Set(src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
Normal file
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
|
||||||
|
// In addition to this, this method will also dereference the input values if
|
||||||
|
// possible so the DeepEqual performed will not fail if one parameter is a
|
||||||
|
// pointer and the other is not.
|
||||||
|
//
|
||||||
|
// DeepEqual will not perform indirection of nested values of the input parameters.
|
||||||
|
func DeepEqual(a, b interface{}) bool {
|
||||||
|
ra := reflect.Indirect(reflect.ValueOf(a))
|
||||||
|
rb := reflect.Indirect(reflect.ValueOf(b))
|
||||||
|
|
||||||
|
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
|
||||||
|
// If the elements are both nil, and of the same type the are equal
|
||||||
|
// If they are of different types they are not equal
|
||||||
|
return reflect.TypeOf(a) == reflect.TypeOf(b)
|
||||||
|
} else if raValid != rbValid {
|
||||||
|
// Both values must be valid to be equal
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return reflect.DeepEqual(ra.Interface(), rb.Interface())
|
||||||
|
}
|
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/jmespath/go-jmespath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
||||||
|
|
||||||
|
// rValuesAtPath returns a slice of values found in value v. The values
|
||||||
|
// in v are explored recursively so all nested values are collected.
|
||||||
|
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
|
||||||
|
pathparts := strings.Split(path, "||")
|
||||||
|
if len(pathparts) > 1 {
|
||||||
|
for _, pathpart := range pathparts {
|
||||||
|
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
|
||||||
|
if len(vals) > 0 {
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
|
||||||
|
components := strings.Split(path, ".")
|
||||||
|
for len(values) > 0 && len(components) > 0 {
|
||||||
|
var index *int64
|
||||||
|
var indexStar bool
|
||||||
|
c := strings.TrimSpace(components[0])
|
||||||
|
if c == "" { // no actual component, illegal syntax
|
||||||
|
return nil
|
||||||
|
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
|
||||||
|
// TODO normalize case for user
|
||||||
|
return nil // don't support unexported fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse this component
|
||||||
|
if m := indexRe.FindStringSubmatch(c); m != nil {
|
||||||
|
c = m[1]
|
||||||
|
if m[2] == "" {
|
||||||
|
index = nil
|
||||||
|
indexStar = true
|
||||||
|
} else {
|
||||||
|
i, _ := strconv.ParseInt(m[2], 10, 32)
|
||||||
|
index = &i
|
||||||
|
indexStar = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nextvals := []reflect.Value{}
|
||||||
|
for _, value := range values {
|
||||||
|
// pull component name out of struct member
|
||||||
|
if value.Kind() != reflect.Struct {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if c == "*" { // pull all members
|
||||||
|
for i := 0; i < value.NumField(); i++ {
|
||||||
|
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
|
||||||
|
nextvals = append(nextvals, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
value = value.FieldByNameFunc(func(name string) bool {
|
||||||
|
if c == name {
|
||||||
|
return true
|
||||||
|
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
|
||||||
|
if !value.IsNil() {
|
||||||
|
value.Set(reflect.Zero(value.Type()))
|
||||||
|
}
|
||||||
|
return []reflect.Value{value}
|
||||||
|
}
|
||||||
|
|
||||||
|
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
|
||||||
|
// TODO if the value is the terminus it should not be created
|
||||||
|
// if the value to be set to its position is nil.
|
||||||
|
value.Set(reflect.New(value.Type().Elem()))
|
||||||
|
value = value.Elem()
|
||||||
|
} else {
|
||||||
|
value = reflect.Indirect(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||||
|
if !createPath && value.IsNil() {
|
||||||
|
value = reflect.ValueOf(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value.IsValid() {
|
||||||
|
nextvals = append(nextvals, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
values = nextvals
|
||||||
|
|
||||||
|
if indexStar || index != nil {
|
||||||
|
nextvals = []reflect.Value{}
|
||||||
|
for _, valItem := range values {
|
||||||
|
value := reflect.Indirect(valItem)
|
||||||
|
if value.Kind() != reflect.Slice {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if indexStar { // grab all indices
|
||||||
|
for i := 0; i < value.Len(); i++ {
|
||||||
|
idx := reflect.Indirect(value.Index(i))
|
||||||
|
if idx.IsValid() {
|
||||||
|
nextvals = append(nextvals, idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// pull out index
|
||||||
|
i := int(*index)
|
||||||
|
if i >= value.Len() { // check out of bounds
|
||||||
|
if createPath {
|
||||||
|
// TODO resize slice
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if i < 0 { // support negative indexing
|
||||||
|
i = value.Len() + i
|
||||||
|
}
|
||||||
|
value = reflect.Indirect(value.Index(i))
|
||||||
|
|
||||||
|
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||||
|
if !createPath && value.IsNil() {
|
||||||
|
value = reflect.ValueOf(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value.IsValid() {
|
||||||
|
nextvals = append(nextvals, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
values = nextvals
|
||||||
|
}
|
||||||
|
|
||||||
|
components = components[1:]
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValuesAtPath returns a list of values at the case insensitive lexical
|
||||||
|
// path inside of a structure.
|
||||||
|
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
|
||||||
|
result, err := jmespath.Search(path, i)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := reflect.ValueOf(result)
|
||||||
|
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if s, ok := result.([]interface{}); ok {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
if v.Kind() == reflect.Map && v.Len() == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if v.Kind() == reflect.Slice {
|
||||||
|
out := make([]interface{}, v.Len())
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
out[i] = v.Index(i).Interface()
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []interface{}{result}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValueAtPath sets a value at the case insensitive lexical path inside
|
||||||
|
// of a structure.
|
||||||
|
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
||||||
|
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
|
||||||
|
for _, rval := range rvals {
|
||||||
|
if rval.Kind() == reflect.Ptr && rval.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
setValue(rval, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setValue(dstVal reflect.Value, src interface{}) {
|
||||||
|
if dstVal.Kind() == reflect.Ptr {
|
||||||
|
dstVal = reflect.Indirect(dstVal)
|
||||||
|
}
|
||||||
|
srcVal := reflect.ValueOf(src)
|
||||||
|
|
||||||
|
if !srcVal.IsValid() { // src is literal nil
|
||||||
|
if dstVal.CanAddr() {
|
||||||
|
// Convert to pointer so that pointer's value can be nil'ed
|
||||||
|
// dstVal = dstVal.Addr()
|
||||||
|
}
|
||||||
|
dstVal.Set(reflect.Zero(dstVal.Type()))
|
||||||
|
|
||||||
|
} else if srcVal.Kind() == reflect.Ptr {
|
||||||
|
if srcVal.IsNil() {
|
||||||
|
srcVal = reflect.Zero(dstVal.Type())
|
||||||
|
} else {
|
||||||
|
srcVal = reflect.ValueOf(src).Elem()
|
||||||
|
}
|
||||||
|
dstVal.Set(srcVal)
|
||||||
|
} else {
|
||||||
|
dstVal.Set(srcVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
107
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
107
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Prettify returns the string representation of a value.
|
||||||
|
func Prettify(i interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
prettify(reflect.ValueOf(i), 0, &buf)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// prettify will recursively walk value v to build a textual
|
||||||
|
// representation of the value.
|
||||||
|
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||||
|
for v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
strtype := v.Type().String()
|
||||||
|
if strtype == "time.Time" {
|
||||||
|
fmt.Fprintf(buf, "%s", v.Interface())
|
||||||
|
break
|
||||||
|
} else if strings.HasPrefix(strtype, "io.") {
|
||||||
|
buf.WriteString("<buffer>")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
names := []string{}
|
||||||
|
for i := 0; i < v.Type().NumField(); i++ {
|
||||||
|
name := v.Type().Field(i).Name
|
||||||
|
f := v.Field(i)
|
||||||
|
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||||
|
continue // ignore unexported fields
|
||||||
|
}
|
||||||
|
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
|
||||||
|
continue // ignore unset fields
|
||||||
|
}
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range names {
|
||||||
|
val := v.FieldByName(n)
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(n + ": ")
|
||||||
|
prettify(val, indent+2, buf)
|
||||||
|
|
||||||
|
if i < len(names)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
case reflect.Slice:
|
||||||
|
nl, id, id2 := "", "", ""
|
||||||
|
if v.Len() > 3 {
|
||||||
|
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||||
|
}
|
||||||
|
buf.WriteString("[" + nl)
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
buf.WriteString(id2)
|
||||||
|
prettify(v.Index(i), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString("," + nl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString(nl + id + "]")
|
||||||
|
case reflect.Map:
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
for i, k := range v.MapKeys() {
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(k.String() + ": ")
|
||||||
|
prettify(v.MapIndex(k), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
default:
|
||||||
|
if !v.IsValid() {
|
||||||
|
fmt.Fprint(buf, "<invalid value>")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
format := "%v"
|
||||||
|
switch v.Interface().(type) {
|
||||||
|
case string:
|
||||||
|
format = "%q"
|
||||||
|
case io.ReadSeeker, io.Reader:
|
||||||
|
format = "buffer(%p)"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(buf, format, v.Interface())
|
||||||
|
}
|
||||||
|
}
|
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
Normal file
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StringValue returns the string representation of a value.
|
||||||
|
func StringValue(i interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
stringValue(reflect.ValueOf(i), 0, &buf)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||||
|
for v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
names := []string{}
|
||||||
|
for i := 0; i < v.Type().NumField(); i++ {
|
||||||
|
name := v.Type().Field(i).Name
|
||||||
|
f := v.Field(i)
|
||||||
|
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||||
|
continue // ignore unexported fields
|
||||||
|
}
|
||||||
|
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
|
||||||
|
continue // ignore unset fields
|
||||||
|
}
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range names {
|
||||||
|
val := v.FieldByName(n)
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(n + ": ")
|
||||||
|
stringValue(val, indent+2, buf)
|
||||||
|
|
||||||
|
if i < len(names)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
case reflect.Slice:
|
||||||
|
nl, id, id2 := "", "", ""
|
||||||
|
if v.Len() > 3 {
|
||||||
|
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||||
|
}
|
||||||
|
buf.WriteString("[" + nl)
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
buf.WriteString(id2)
|
||||||
|
stringValue(v.Index(i), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString("," + nl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString(nl + id + "]")
|
||||||
|
case reflect.Map:
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
for i, k := range v.MapKeys() {
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(k.String() + ": ")
|
||||||
|
stringValue(v.MapIndex(k), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
default:
|
||||||
|
format := "%v"
|
||||||
|
switch v.Interface().(type) {
|
||||||
|
case string:
|
||||||
|
format = "%q"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(buf, format, v.Interface())
|
||||||
|
}
|
||||||
|
}
|
139
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
Normal file
139
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http/httputil"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Config provides configuration to a service client instance.
|
||||||
|
type Config struct {
|
||||||
|
Config *aws.Config
|
||||||
|
Handlers request.Handlers
|
||||||
|
Endpoint, SigningRegion string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigProvider provides a generic way for a service client to receive
|
||||||
|
// the ClientConfig without circular dependencies.
|
||||||
|
type ConfigProvider interface {
|
||||||
|
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Client implements the base client request and response handling
|
||||||
|
// used by all service clients.
|
||||||
|
type Client struct {
|
||||||
|
request.Retryer
|
||||||
|
metadata.ClientInfo
|
||||||
|
|
||||||
|
Config aws.Config
|
||||||
|
Handlers request.Handlers
|
||||||
|
}
|
||||||
|
|
||||||
|
// New will return a pointer to a new initialized service client.
|
||||||
|
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
|
||||||
|
svc := &Client{
|
||||||
|
Config: cfg,
|
||||||
|
ClientInfo: info,
|
||||||
|
Handlers: handlers,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch retryer, ok := cfg.Retryer.(request.Retryer); {
|
||||||
|
case ok:
|
||||||
|
svc.Retryer = retryer
|
||||||
|
case cfg.Retryer != nil && cfg.Logger != nil:
|
||||||
|
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
|
||||||
|
cfg.Logger.Log(s)
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||||
|
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||||
|
maxRetries = 3
|
||||||
|
}
|
||||||
|
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||||
|
}
|
||||||
|
|
||||||
|
svc.AddDebugHandlers()
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(svc)
|
||||||
|
}
|
||||||
|
|
||||||
|
return svc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequest returns a new Request pointer for the service API
|
||||||
|
// operation and parameters.
|
||||||
|
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
|
||||||
|
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddDebugHandlers injects debug logging handlers into the service to log request
|
||||||
|
// debug information.
|
||||||
|
func (c *Client) AddDebugHandlers() {
|
||||||
|
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Handlers.Send.PushFront(logRequest)
|
||||||
|
c.Handlers.Send.PushBack(logResponse)
|
||||||
|
}
|
||||||
|
|
||||||
|
const logReqMsg = `DEBUG: Request %s/%s Details:
|
||||||
|
---[ REQUEST POST-SIGN ]-----------------------------
|
||||||
|
%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
|
||||||
|
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
|
||||||
|
---[ REQUEST DUMP ERROR ]-----------------------------
|
||||||
|
%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
|
||||||
|
func logRequest(r *request.Request) {
|
||||||
|
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||||
|
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||||
|
if err != nil {
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if logBody {
|
||||||
|
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
|
||||||
|
// Body as a NoOpCloser and will not be reset after read by the HTTP
|
||||||
|
// client reader.
|
||||||
|
r.Body.Seek(r.BodyStart, 0)
|
||||||
|
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
|
||||||
|
}
|
||||||
|
|
||||||
|
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||||
|
---[ RESPONSE ]--------------------------------------
|
||||||
|
%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
|
||||||
|
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
||||||
|
---[ RESPONSE DUMP ERROR ]-----------------------------
|
||||||
|
%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
|
||||||
|
func logResponse(r *request.Request) {
|
||||||
|
var msg = "no response data"
|
||||||
|
if r.HTTPResponse != nil {
|
||||||
|
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||||
|
dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
|
||||||
|
if err != nil {
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
msg = string(dumpedBody)
|
||||||
|
} else if r.Error != nil {
|
||||||
|
msg = r.Error.Error()
|
||||||
|
}
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
|
||||||
|
}
|
90
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
Normal file
90
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||||
|
// most services. If you want to implement custom retry logic, implement the
|
||||||
|
// request.Retryer interface or create a structure type that composes this
|
||||||
|
// struct and override the specific methods. For example, to override only
|
||||||
|
// the MaxRetries method:
|
||||||
|
//
|
||||||
|
// type retryer struct {
|
||||||
|
// service.DefaultRetryer
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // This implementation always has 100 max retries
|
||||||
|
// func (d retryer) MaxRetries() uint { return 100 }
|
||||||
|
type DefaultRetryer struct {
|
||||||
|
NumMaxRetries int
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxRetries returns the number of maximum returns the service will use to make
|
||||||
|
// an individual API request.
|
||||||
|
func (d DefaultRetryer) MaxRetries() int {
|
||||||
|
return d.NumMaxRetries
|
||||||
|
}
|
||||||
|
|
||||||
|
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||||
|
|
||||||
|
// RetryRules returns the delay duration before retrying this request again
|
||||||
|
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||||
|
// Set the upper limit of delay in retrying at ~five minutes
|
||||||
|
minTime := 30
|
||||||
|
throttle := d.shouldThrottle(r)
|
||||||
|
if throttle {
|
||||||
|
minTime = 500
|
||||||
|
}
|
||||||
|
|
||||||
|
retryCount := r.RetryCount
|
||||||
|
if retryCount > 13 {
|
||||||
|
retryCount = 13
|
||||||
|
} else if throttle && retryCount > 8 {
|
||||||
|
retryCount = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
|
||||||
|
return time.Duration(delay) * time.Millisecond
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldRetry returns true if the request should be retried.
|
||||||
|
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||||
|
if r.HTTPResponse.StatusCode >= 500 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldThrottle returns true if the request should be throttled.
|
||||||
|
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
||||||
|
if r.HTTPResponse.StatusCode == 502 ||
|
||||||
|
r.HTTPResponse.StatusCode == 503 ||
|
||||||
|
r.HTTPResponse.StatusCode == 504 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return r.IsErrorThrottle()
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockedSource is a thread-safe implementation of rand.Source
|
||||||
|
type lockedSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Int63() (n int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
n = r.src.Int63()
|
||||||
|
r.lk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Seed(seed int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
r.src.Seed(seed)
|
||||||
|
r.lk.Unlock()
|
||||||
|
}
|
12
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
package metadata
|
||||||
|
|
||||||
|
// ClientInfo wraps immutable data from the client.Client structure.
|
||||||
|
type ClientInfo struct {
|
||||||
|
ServiceName string
|
||||||
|
APIVersion string
|
||||||
|
Endpoint string
|
||||||
|
SigningName string
|
||||||
|
SigningRegion string
|
||||||
|
JSONVersion string
|
||||||
|
TargetPrefix string
|
||||||
|
}
|
422
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
422
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
|
@ -0,0 +1,422 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UseServiceDefaultRetries instructs the config to use the service's own
|
||||||
|
// default number of retries. This will be the default action if
|
||||||
|
// Config.MaxRetries is nil also.
|
||||||
|
const UseServiceDefaultRetries = -1
|
||||||
|
|
||||||
|
// RequestRetryer is an alias for a type that implements the request.Retryer
|
||||||
|
// interface.
|
||||||
|
type RequestRetryer interface{}
|
||||||
|
|
||||||
|
// A Config provides service configuration for service clients. By default,
|
||||||
|
// all clients will use the defaults.DefaultConfig tructure.
|
||||||
|
//
|
||||||
|
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||||
|
// // service clients.
|
||||||
|
// sess, err := session.NewSession(&aws.Config{
|
||||||
|
// MaxRetries: aws.Int(3),
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// // Create S3 service client with a specific Region.
|
||||||
|
// svc := s3.New(sess, &aws.Config{
|
||||||
|
// Region: aws.String("us-west-2"),
|
||||||
|
// })
|
||||||
|
type Config struct {
|
||||||
|
// Enables verbose error printing of all credential chain errors.
|
||||||
|
// Should be used when wanting to see all errors while attempting to
|
||||||
|
// retrieve credentials.
|
||||||
|
CredentialsChainVerboseErrors *bool
|
||||||
|
|
||||||
|
// The credentials object to use when signing requests. Defaults to a
|
||||||
|
// chain of credential providers to search for credentials in environment
|
||||||
|
// variables, shared credential file, and EC2 Instance Roles.
|
||||||
|
Credentials *credentials.Credentials
|
||||||
|
|
||||||
|
// An optional endpoint URL (hostname only or fully qualified URI)
|
||||||
|
// that overrides the default generated endpoint for a client. Set this
|
||||||
|
// to `""` to use the default generated endpoint.
|
||||||
|
//
|
||||||
|
// @note You must still provide a `Region` value when specifying an
|
||||||
|
// endpoint for a client.
|
||||||
|
Endpoint *string
|
||||||
|
|
||||||
|
// The region to send requests to. This parameter is required and must
|
||||||
|
// be configured globally or on a per-client basis unless otherwise
|
||||||
|
// noted. A full list of regions is found in the "Regions and Endpoints"
|
||||||
|
// document.
|
||||||
|
//
|
||||||
|
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||||
|
// AWS Regions and Endpoints
|
||||||
|
Region *string
|
||||||
|
|
||||||
|
// Set this to `true` to disable SSL when sending requests. Defaults
|
||||||
|
// to `false`.
|
||||||
|
DisableSSL *bool
|
||||||
|
|
||||||
|
// The HTTP client to use when sending requests. Defaults to
|
||||||
|
// `http.DefaultClient`.
|
||||||
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
// An integer value representing the logging level. The default log level
|
||||||
|
// is zero (LogOff), which represents no logging. To enable logging set
|
||||||
|
// to a LogLevel Value.
|
||||||
|
LogLevel *LogLevelType
|
||||||
|
|
||||||
|
// The logger writer interface to write logging messages to. Defaults to
|
||||||
|
// standard out.
|
||||||
|
Logger Logger
|
||||||
|
|
||||||
|
// The maximum number of times that a request will be retried for failures.
|
||||||
|
// Defaults to -1, which defers the max retry setting to the service
|
||||||
|
// specific configuration.
|
||||||
|
MaxRetries *int
|
||||||
|
|
||||||
|
// Retryer guides how HTTP requests should be retried in case of
|
||||||
|
// recoverable failures.
|
||||||
|
//
|
||||||
|
// When nil or the value does not implement the request.Retryer interface,
|
||||||
|
// the request.DefaultRetryer will be used.
|
||||||
|
//
|
||||||
|
// When both Retryer and MaxRetries are non-nil, the former is used and
|
||||||
|
// the latter ignored.
|
||||||
|
//
|
||||||
|
// To set the Retryer field in a type-safe manner and with chaining, use
|
||||||
|
// the request.WithRetryer helper function:
|
||||||
|
//
|
||||||
|
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
|
||||||
|
//
|
||||||
|
Retryer RequestRetryer
|
||||||
|
|
||||||
|
// Disables semantic parameter validation, which validates input for
|
||||||
|
// missing required fields and/or other semantic request input errors.
|
||||||
|
DisableParamValidation *bool
|
||||||
|
|
||||||
|
// Disables the computation of request and response checksums, e.g.,
|
||||||
|
// CRC32 checksums in Amazon DynamoDB.
|
||||||
|
DisableComputeChecksums *bool
|
||||||
|
|
||||||
|
// Set this to `true` to force the request to use path-style addressing,
|
||||||
|
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
|
||||||
|
// will use virtual hosted bucket addressing when possible
|
||||||
|
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
||||||
|
//
|
||||||
|
// @note This configuration option is specific to the Amazon S3 service.
|
||||||
|
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||||
|
// Amazon S3: Virtual Hosting of Buckets
|
||||||
|
S3ForcePathStyle *bool
|
||||||
|
|
||||||
|
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
||||||
|
// header to PUT requests over 2MB of content. 100-Continue instructs the
|
||||||
|
// HTTP client not to send the body until the service responds with a
|
||||||
|
// `continue` status. This is useful to prevent sending the request body
|
||||||
|
// until after the request is authenticated, and validated.
|
||||||
|
//
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||||
|
//
|
||||||
|
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
|
||||||
|
// `ExpectContinueTimeout` for information on adjusting the continue wait
|
||||||
|
// timeout. https://golang.org/pkg/net/http/#Transport
|
||||||
|
//
|
||||||
|
// You should use this flag to disble 100-Continue if you experience issues
|
||||||
|
// with proxies or third party S3 compatible services.
|
||||||
|
S3Disable100Continue *bool
|
||||||
|
|
||||||
|
// Set this to `true` to enable S3 Accelerate feature. For all operations
|
||||||
|
// compatible with S3 Accelerate will use the accelerate endpoint for
|
||||||
|
// requests. Requests not compatible will fall back to normal S3 requests.
|
||||||
|
//
|
||||||
|
// The bucket must be enable for accelerate to be used with S3 client with
|
||||||
|
// accelerate enabled. If the bucket is not enabled for accelerate an error
|
||||||
|
// will be returned. The bucket name must be DNS compatible to also work
|
||||||
|
// with accelerate.
|
||||||
|
//
|
||||||
|
// Not compatible with UseDualStack requests will fail if both flags are
|
||||||
|
// specified.
|
||||||
|
S3UseAccelerate *bool
|
||||||
|
|
||||||
|
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||||
|
// default http.Client's Timeout. This is helpful if you do not want the
|
||||||
|
// EC2Metadata client to create a new http.Client. This options is only
|
||||||
|
// meaningful if you're not already using a custom HTTP client with the
|
||||||
|
// SDK. Enabled by default.
|
||||||
|
//
|
||||||
|
// Must be set and provided to the session.NewSession() in order to disable
|
||||||
|
// the EC2Metadata overriding the timeout for default credentials chain.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
|
||||||
|
//
|
||||||
|
// svc := s3.New(sess)
|
||||||
|
//
|
||||||
|
EC2MetadataDisableTimeoutOverride *bool
|
||||||
|
|
||||||
|
// Instructs the endpiont to be generated for a service client to
|
||||||
|
// be the dual stack endpoint. The dual stack endpoint will support
|
||||||
|
// both IPv4 and IPv6 addressing.
|
||||||
|
//
|
||||||
|
// Setting this for a service which does not support dual stack will fail
|
||||||
|
// to make requets. It is not recommended to set this value on the session
|
||||||
|
// as it will apply to all service clients created with the session. Even
|
||||||
|
// services which don't support dual stack endpoints.
|
||||||
|
//
|
||||||
|
// If the Endpoint config value is also provided the UseDualStack flag
|
||||||
|
// will be ignored.
|
||||||
|
//
|
||||||
|
// Only supported with.
|
||||||
|
//
|
||||||
|
// sess, err := session.NewSession()
|
||||||
|
//
|
||||||
|
// svc := s3.New(sess, &aws.Config{
|
||||||
|
// UseDualStack: aws.Bool(true),
|
||||||
|
// })
|
||||||
|
UseDualStack *bool
|
||||||
|
|
||||||
|
// SleepDelay is an override for the func the SDK will call when sleeping
|
||||||
|
// during the lifecycle of a request. Specifically this will be used for
|
||||||
|
// request delays. This value should only be used for testing. To adjust
|
||||||
|
// the delay of a request see the aws/client.DefaultRetryer and
|
||||||
|
// aws/request.Retryer.
|
||||||
|
SleepDelay func(time.Duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfig returns a new Config pointer that can be chained with builder
|
||||||
|
// methods to set multiple configuration values inline without using pointers.
|
||||||
|
//
|
||||||
|
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||||
|
// // service clients.
|
||||||
|
// sess, err := session.NewSession(aws.NewConfig().
|
||||||
|
// WithMaxRetries(3),
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// // Create S3 service client with a specific Region.
|
||||||
|
// svc := s3.New(sess, aws.NewConfig().
|
||||||
|
// WithRegion("us-west-2"),
|
||||||
|
// )
|
||||||
|
func NewConfig() *Config {
|
||||||
|
return &Config{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
|
||||||
|
// a Config pointer.
|
||||||
|
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
|
||||||
|
c.CredentialsChainVerboseErrors = &verboseErrs
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCredentials sets a config Credentials value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
|
||||||
|
c.Credentials = creds
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEndpoint sets a config Endpoint value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithEndpoint(endpoint string) *Config {
|
||||||
|
c.Endpoint = &endpoint
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRegion sets a config Region value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithRegion(region string) *Config {
|
||||||
|
c.Region = ®ion
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithDisableSSL(disable bool) *Config {
|
||||||
|
c.DisableSSL = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithHTTPClient(client *http.Client) *Config {
|
||||||
|
c.HTTPClient = client
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithMaxRetries(max int) *Config {
|
||||||
|
c.MaxRetries = &max
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableParamValidation sets a config DisableParamValidation value
|
||||||
|
// returning a Config pointer for chaining.
|
||||||
|
func (c *Config) WithDisableParamValidation(disable bool) *Config {
|
||||||
|
c.DisableParamValidation = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
|
||||||
|
// returning a Config pointer for chaining.
|
||||||
|
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
|
||||||
|
c.DisableComputeChecksums = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLogLevel sets a config LogLevel value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithLogLevel(level LogLevelType) *Config {
|
||||||
|
c.LogLevel = &level
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLogger sets a config Logger value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithLogger(logger Logger) *Config {
|
||||||
|
c.Logger = logger
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
|
||||||
|
// pointer for chaining.
|
||||||
|
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
||||||
|
c.S3ForcePathStyle = &force
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
|
||||||
|
// a Config pointer for chaining.
|
||||||
|
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
||||||
|
c.S3Disable100Continue = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
|
||||||
|
// pointer for chaining.
|
||||||
|
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||||
|
c.S3UseAccelerate = &enable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUseDualStack sets a config UseDualStack value returning a Config
|
||||||
|
// pointer for chaining.
|
||||||
|
func (c *Config) WithUseDualStack(enable bool) *Config {
|
||||||
|
c.UseDualStack = &enable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
||||||
|
// returning a Config pointer for chaining.
|
||||||
|
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
||||||
|
c.EC2MetadataDisableTimeoutOverride = &enable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSleepDelay overrides the function used to sleep while waiting for the
|
||||||
|
// next retry. Defaults to time.Sleep.
|
||||||
|
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
||||||
|
c.SleepDelay = fn
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeIn merges the passed in configs into the existing config object.
|
||||||
|
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||||
|
for _, other := range cfgs {
|
||||||
|
mergeInConfig(c, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeInConfig(dst *Config, other *Config) {
|
||||||
|
if other == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.CredentialsChainVerboseErrors != nil {
|
||||||
|
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Credentials != nil {
|
||||||
|
dst.Credentials = other.Credentials
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Endpoint != nil {
|
||||||
|
dst.Endpoint = other.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Region != nil {
|
||||||
|
dst.Region = other.Region
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.DisableSSL != nil {
|
||||||
|
dst.DisableSSL = other.DisableSSL
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.HTTPClient != nil {
|
||||||
|
dst.HTTPClient = other.HTTPClient
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.LogLevel != nil {
|
||||||
|
dst.LogLevel = other.LogLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Logger != nil {
|
||||||
|
dst.Logger = other.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.MaxRetries != nil {
|
||||||
|
dst.MaxRetries = other.MaxRetries
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Retryer != nil {
|
||||||
|
dst.Retryer = other.Retryer
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.DisableParamValidation != nil {
|
||||||
|
dst.DisableParamValidation = other.DisableParamValidation
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.DisableComputeChecksums != nil {
|
||||||
|
dst.DisableComputeChecksums = other.DisableComputeChecksums
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.S3ForcePathStyle != nil {
|
||||||
|
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.S3Disable100Continue != nil {
|
||||||
|
dst.S3Disable100Continue = other.S3Disable100Continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.S3UseAccelerate != nil {
|
||||||
|
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.UseDualStack != nil {
|
||||||
|
dst.UseDualStack = other.UseDualStack
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.EC2MetadataDisableTimeoutOverride != nil {
|
||||||
|
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.SleepDelay != nil {
|
||||||
|
dst.SleepDelay = other.SleepDelay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy will return a shallow copy of the Config object. If any additional
|
||||||
|
// configurations are provided they will be merged into the new config returned.
|
||||||
|
func (c *Config) Copy(cfgs ...*Config) *Config {
|
||||||
|
dst := &Config{}
|
||||||
|
dst.MergeIn(c)
|
||||||
|
|
||||||
|
for _, cfg := range cfgs {
|
||||||
|
dst.MergeIn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
369
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
Normal file
369
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
Normal file
|
@ -0,0 +1,369 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// String returns a pointer to the string value passed in.
|
||||||
|
func String(v string) *string {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValue returns the value of the string pointer passed in or
|
||||||
|
// "" if the pointer is nil.
|
||||||
|
func StringValue(v *string) string {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice converts a slice of string values into a slice of
|
||||||
|
// string pointers
|
||||||
|
func StringSlice(src []string) []*string {
|
||||||
|
dst := make([]*string, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValueSlice converts a slice of string pointers into a slice of
|
||||||
|
// string values
|
||||||
|
func StringValueSlice(src []*string) []string {
|
||||||
|
dst := make([]string, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringMap converts a string map of string values into a string
|
||||||
|
// map of string pointers
|
||||||
|
func StringMap(src map[string]string) map[string]*string {
|
||||||
|
dst := make(map[string]*string)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValueMap converts a string map of string pointers into a string
|
||||||
|
// map of string values
|
||||||
|
func StringValueMap(src map[string]*string) map[string]string {
|
||||||
|
dst := make(map[string]string)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns a pointer to the bool value passed in.
|
||||||
|
func Bool(v bool) *bool {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValue returns the value of the bool pointer passed in or
|
||||||
|
// false if the pointer is nil.
|
||||||
|
func BoolValue(v *bool) bool {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice converts a slice of bool values into a slice of
|
||||||
|
// bool pointers
|
||||||
|
func BoolSlice(src []bool) []*bool {
|
||||||
|
dst := make([]*bool, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValueSlice converts a slice of bool pointers into a slice of
|
||||||
|
// bool values
|
||||||
|
func BoolValueSlice(src []*bool) []bool {
|
||||||
|
dst := make([]bool, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolMap converts a string map of bool values into a string
|
||||||
|
// map of bool pointers
|
||||||
|
func BoolMap(src map[string]bool) map[string]*bool {
|
||||||
|
dst := make(map[string]*bool)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValueMap converts a string map of bool pointers into a string
|
||||||
|
// map of bool values
|
||||||
|
func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||||
|
dst := make(map[string]bool)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns a pointer to the int value passed in.
|
||||||
|
func Int(v int) *int {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValue returns the value of the int pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func IntValue(v *int) int {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntSlice converts a slice of int values into a slice of
|
||||||
|
// int pointers
|
||||||
|
func IntSlice(src []int) []*int {
|
||||||
|
dst := make([]*int, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValueSlice converts a slice of int pointers into a slice of
|
||||||
|
// int values
|
||||||
|
func IntValueSlice(src []*int) []int {
|
||||||
|
dst := make([]int, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntMap converts a string map of int values into a string
|
||||||
|
// map of int pointers
|
||||||
|
func IntMap(src map[string]int) map[string]*int {
|
||||||
|
dst := make(map[string]*int)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValueMap converts a string map of int pointers into a string
|
||||||
|
// map of int values
|
||||||
|
func IntValueMap(src map[string]*int) map[string]int {
|
||||||
|
dst := make(map[string]int)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 returns a pointer to the int64 value passed in.
|
||||||
|
func Int64(v int64) *int64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Value returns the value of the int64 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Int64Value(v *int64) int64 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Slice converts a slice of int64 values into a slice of
|
||||||
|
// int64 pointers
|
||||||
|
func Int64Slice(src []int64) []*int64 {
|
||||||
|
dst := make([]*int64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
||||||
|
// int64 values
|
||||||
|
func Int64ValueSlice(src []*int64) []int64 {
|
||||||
|
dst := make([]int64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Map converts a string map of int64 values into a string
|
||||||
|
// map of int64 pointers
|
||||||
|
func Int64Map(src map[string]int64) map[string]*int64 {
|
||||||
|
dst := make(map[string]*int64)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ValueMap converts a string map of int64 pointers into a string
|
||||||
|
// map of int64 values
|
||||||
|
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||||
|
dst := make(map[string]int64)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 returns a pointer to the float64 value passed in.
|
||||||
|
func Float64(v float64) *float64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Value returns the value of the float64 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Float64Value(v *float64) float64 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Slice converts a slice of float64 values into a slice of
|
||||||
|
// float64 pointers
|
||||||
|
func Float64Slice(src []float64) []*float64 {
|
||||||
|
dst := make([]*float64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
||||||
|
// float64 values
|
||||||
|
func Float64ValueSlice(src []*float64) []float64 {
|
||||||
|
dst := make([]float64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Map converts a string map of float64 values into a string
|
||||||
|
// map of float64 pointers
|
||||||
|
func Float64Map(src map[string]float64) map[string]*float64 {
|
||||||
|
dst := make(map[string]*float64)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ValueMap converts a string map of float64 pointers into a string
|
||||||
|
// map of float64 values
|
||||||
|
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||||
|
dst := make(map[string]float64)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns a pointer to the time.Time value passed in.
|
||||||
|
func Time(v time.Time) *time.Time {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeValue returns the value of the time.Time pointer passed in or
|
||||||
|
// time.Time{} if the pointer is nil.
|
||||||
|
func TimeValue(v *time.Time) time.Time {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
||||||
|
// The result is undefined if the Unix time cannot be represented by an int64.
|
||||||
|
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
||||||
|
//
|
||||||
|
// This utility is useful for service API's such as CloudWatch Logs which require
|
||||||
|
// their unix time values to be in milliseconds.
|
||||||
|
//
|
||||||
|
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
|
||||||
|
func TimeUnixMilli(t time.Time) int64 {
|
||||||
|
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeSlice converts a slice of time.Time values into a slice of
|
||||||
|
// time.Time pointers
|
||||||
|
func TimeSlice(src []time.Time) []*time.Time {
|
||||||
|
dst := make([]*time.Time, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
||||||
|
// time.Time values
|
||||||
|
func TimeValueSlice(src []*time.Time) []time.Time {
|
||||||
|
dst := make([]time.Time, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeMap converts a string map of time.Time values into a string
|
||||||
|
// map of time.Time pointers
|
||||||
|
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
||||||
|
dst := make(map[string]*time.Time)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeValueMap converts a string map of time.Time pointers into a string
|
||||||
|
// map of time.Time values
|
||||||
|
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
||||||
|
dst := make(map[string]time.Time)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
152
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
Normal file
152
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
package corehandlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Interface for matching types which also have a Len method.
|
||||||
|
type lener interface {
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildContentLengthHandler builds the content length of a request based on the body,
|
||||||
|
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
||||||
|
// to determine request body length and no "Content-Length" was specified it will panic.
|
||||||
|
//
|
||||||
|
// The Content-Length will only be aded to the request if the length of the body
|
||||||
|
// is greater than 0. If the body is empty or the current `Content-Length`
|
||||||
|
// header is <= 0, the header will also be stripped.
|
||||||
|
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
||||||
|
var length int64
|
||||||
|
|
||||||
|
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||||
|
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||||
|
} else {
|
||||||
|
switch body := r.Body.(type) {
|
||||||
|
case nil:
|
||||||
|
length = 0
|
||||||
|
case lener:
|
||||||
|
length = int64(body.Len())
|
||||||
|
case io.Seeker:
|
||||||
|
r.BodyStart, _ = body.Seek(0, 1)
|
||||||
|
end, _ := body.Seek(0, 2)
|
||||||
|
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
|
||||||
|
length = end - r.BodyStart
|
||||||
|
default:
|
||||||
|
panic("Cannot get length of body, must provide `ContentLength`")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if length > 0 {
|
||||||
|
r.HTTPRequest.ContentLength = length
|
||||||
|
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||||
|
} else {
|
||||||
|
r.HTTPRequest.ContentLength = 0
|
||||||
|
r.HTTPRequest.Header.Del("Content-Length")
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
||||||
|
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||||
|
Name: "core.SDKVersionUserAgentHandler",
|
||||||
|
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||||
|
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||||
|
}
|
||||||
|
|
||||||
|
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
||||||
|
|
||||||
|
// SendHandler is a request handler to send service request using HTTP client.
|
||||||
|
var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
|
||||||
|
var err error
|
||||||
|
r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
|
||||||
|
if err != nil {
|
||||||
|
// Prevent leaking if an HTTPResponse was returned. Clean up
|
||||||
|
// the body.
|
||||||
|
if r.HTTPResponse != nil {
|
||||||
|
r.HTTPResponse.Body.Close()
|
||||||
|
}
|
||||||
|
// Capture the case where url.Error is returned for error processing
|
||||||
|
// response. e.g. 301 without location header comes back as string
|
||||||
|
// error and r.HTTPResponse is nil. Other url redirect errors will
|
||||||
|
// comeback in a similar method.
|
||||||
|
if e, ok := err.(*url.Error); ok && e.Err != nil {
|
||||||
|
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
|
||||||
|
code, _ := strconv.ParseInt(s[1], 10, 64)
|
||||||
|
r.HTTPResponse = &http.Response{
|
||||||
|
StatusCode: int(code),
|
||||||
|
Status: http.StatusText(int(code)),
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r.HTTPResponse == nil {
|
||||||
|
// Add a dummy request response object to ensure the HTTPResponse
|
||||||
|
// value is consistent.
|
||||||
|
r.HTTPResponse = &http.Response{
|
||||||
|
StatusCode: int(0),
|
||||||
|
Status: http.StatusText(int(0)),
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Catch all other request errors.
|
||||||
|
r.Error = awserr.New("RequestError", "send request failed", err)
|
||||||
|
r.Retryable = aws.Bool(true) // network errors are retryable
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// ValidateResponseHandler is a request handler to validate service response.
|
||||||
|
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
|
||||||
|
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
||||||
|
// this may be replaced by an UnmarshalError handler
|
||||||
|
r.Error = awserr.New("UnknownError", "unknown error", nil)
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// AfterRetryHandler performs final checks to determine if the request should
|
||||||
|
// be retried and how long to delay.
|
||||||
|
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
|
||||||
|
// If one of the other handlers already set the retry state
|
||||||
|
// we don't want to override it based on the service's state
|
||||||
|
if r.Retryable == nil {
|
||||||
|
r.Retryable = aws.Bool(r.ShouldRetry(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.WillRetry() {
|
||||||
|
r.RetryDelay = r.RetryRules(r)
|
||||||
|
r.Config.SleepDelay(r.RetryDelay)
|
||||||
|
|
||||||
|
// when the expired token exception occurs the credentials
|
||||||
|
// need to be expired locally so that the next request to
|
||||||
|
// get credentials will trigger a credentials refresh.
|
||||||
|
if r.IsErrorExpired() {
|
||||||
|
r.Config.Credentials.Expire()
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RetryCount++
|
||||||
|
r.Error = nil
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// ValidateEndpointHandler is a request handler to validate a request had the
|
||||||
|
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
||||||
|
// region is not valid.
|
||||||
|
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
|
||||||
|
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
|
||||||
|
r.Error = aws.ErrMissingRegion
|
||||||
|
} else if r.ClientInfo.Endpoint == "" {
|
||||||
|
r.Error = aws.ErrMissingEndpoint
|
||||||
|
}
|
||||||
|
}}
|
17
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
Normal file
17
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package corehandlers
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
|
||||||
|
// ValidateParametersHandler is a request handler to validate the input parameters.
|
||||||
|
// Validating parameters only has meaning if done prior to the request being sent.
|
||||||
|
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
||||||
|
if !r.ParamsFilled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := r.Params.(request.Validator); ok {
|
||||||
|
if err := v.Validate(); err != nil {
|
||||||
|
r.Error = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}
|
100
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
100
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
|
||||||
|
// providers in the ChainProvider.
|
||||||
|
//
|
||||||
|
// This has been deprecated. For verbose error messaging set
|
||||||
|
// aws.Config.CredentialsChainVerboseErrors to true
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
|
||||||
|
`no valid providers in chain. Deprecated.
|
||||||
|
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
|
||||||
|
nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A ChainProvider will search for a provider which returns credentials
|
||||||
|
// and cache that provider until Retrieve is called again.
|
||||||
|
//
|
||||||
|
// The ChainProvider provides a way of chaining multiple providers together
|
||||||
|
// which will pick the first available using priority order of the Providers
|
||||||
|
// in the list.
|
||||||
|
//
|
||||||
|
// If none of the Providers retrieve valid credentials Value, ChainProvider's
|
||||||
|
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
|
||||||
|
//
|
||||||
|
// If a Provider is found which returns valid credentials Value ChainProvider
|
||||||
|
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
||||||
|
// called again.
|
||||||
|
//
|
||||||
|
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
|
||||||
|
// In this example EnvProvider will first check if any credentials are available
|
||||||
|
// vai the environment variables. If there are none ChainProvider will check
|
||||||
|
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
|
||||||
|
// does not return any credentials ChainProvider will return the error
|
||||||
|
// ErrNoValidProvidersFoundInChain
|
||||||
|
//
|
||||||
|
// creds := NewChainCredentials(
|
||||||
|
// []Provider{
|
||||||
|
// &EnvProvider{},
|
||||||
|
// &EC2RoleProvider{
|
||||||
|
// Client: ec2metadata.New(sess),
|
||||||
|
// },
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// // Usage of ChainCredentials with aws.Config
|
||||||
|
// svc := ec2.New(&aws.Config{Credentials: creds})
|
||||||
|
//
|
||||||
|
type ChainProvider struct {
|
||||||
|
Providers []Provider
|
||||||
|
curr Provider
|
||||||
|
VerboseErrors bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChainCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping a chain of providers.
|
||||||
|
func NewChainCredentials(providers []Provider) *Credentials {
|
||||||
|
return NewCredentials(&ChainProvider{
|
||||||
|
Providers: append([]Provider{}, providers...),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve returns the credentials value or error if no provider returned
|
||||||
|
// without error.
|
||||||
|
//
|
||||||
|
// If a provider is found it will be cached and any calls to IsExpired()
|
||||||
|
// will return the expired state of the cached provider.
|
||||||
|
func (c *ChainProvider) Retrieve() (Value, error) {
|
||||||
|
var errs []error
|
||||||
|
for _, p := range c.Providers {
|
||||||
|
creds, err := p.Retrieve()
|
||||||
|
if err == nil {
|
||||||
|
c.curr = p
|
||||||
|
return creds, nil
|
||||||
|
}
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
c.curr = nil
|
||||||
|
|
||||||
|
var err error
|
||||||
|
err = ErrNoValidProvidersFoundInChain
|
||||||
|
if c.VerboseErrors {
|
||||||
|
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
|
||||||
|
}
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired will returned the expired state of the currently cached provider
|
||||||
|
// if there is one. If there is no current provider, true will be returned.
|
||||||
|
func (c *ChainProvider) IsExpired() bool {
|
||||||
|
if c.curr != nil {
|
||||||
|
return c.curr.IsExpired()
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
223
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
223
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
||||||
|
// Package credentials provides credential retrieval and management
|
||||||
|
//
|
||||||
|
// The Credentials is the primary method of getting access to and managing
|
||||||
|
// credentials Values. Using dependency injection retrieval of the credential
|
||||||
|
// values is handled by a object which satisfies the Provider interface.
|
||||||
|
//
|
||||||
|
// By default the Credentials.Get() will cache the successful result of a
|
||||||
|
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
|
||||||
|
// point Credentials will call Provider's Retrieve() to get new credential Value.
|
||||||
|
//
|
||||||
|
// The Provider is responsible for determining when credentials Value have expired.
|
||||||
|
// It is also important to note that Credentials will always call Retrieve the
|
||||||
|
// first time Credentials.Get() is called.
|
||||||
|
//
|
||||||
|
// Example of using the environment variable credentials.
|
||||||
|
//
|
||||||
|
// creds := NewEnvCredentials()
|
||||||
|
//
|
||||||
|
// // Retrieve the credentials value
|
||||||
|
// credValue, err := creds.Get()
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example of forcing credentials to expire and be refreshed on the next Get().
|
||||||
|
// This may be helpful to proactively expire credentials and refresh them sooner
|
||||||
|
// than they would naturally expire on their own.
|
||||||
|
//
|
||||||
|
// creds := NewCredentials(&EC2RoleProvider{})
|
||||||
|
// creds.Expire()
|
||||||
|
// credsValue, err := creds.Get()
|
||||||
|
// // New credentials will be retrieved instead of from cache.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Custom Provider
|
||||||
|
//
|
||||||
|
// Each Provider built into this package also provides a helper method to generate
|
||||||
|
// a Credentials pointer setup with the provider. To use a custom Provider just
|
||||||
|
// create a type which satisfies the Provider interface and pass it to the
|
||||||
|
// NewCredentials method.
|
||||||
|
//
|
||||||
|
// type MyProvider struct{}
|
||||||
|
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
||||||
|
// func (m *MyProvider) IsExpired() bool {...}
|
||||||
|
//
|
||||||
|
// creds := NewCredentials(&MyProvider{})
|
||||||
|
// credValue, err := creds.Get()
|
||||||
|
//
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnonymousCredentials is an empty Credential object that can be used as
|
||||||
|
// dummy placeholder credentials for requests that do not need signed.
|
||||||
|
//
|
||||||
|
// This Credentials can be used to configure a service to not sign requests
|
||||||
|
// when making service API calls. For example, when accessing public
|
||||||
|
// s3 buckets.
|
||||||
|
//
|
||||||
|
// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
|
||||||
|
// // Access public S3 buckets.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
var AnonymousCredentials = NewStaticCredentials("", "", "")
|
||||||
|
|
||||||
|
// A Value is the AWS credentials value for individual credential fields.
|
||||||
|
type Value struct {
|
||||||
|
// AWS Access key ID
|
||||||
|
AccessKeyID string
|
||||||
|
|
||||||
|
// AWS Secret Access Key
|
||||||
|
SecretAccessKey string
|
||||||
|
|
||||||
|
// AWS Session Token
|
||||||
|
SessionToken string
|
||||||
|
|
||||||
|
// Provider used to get credentials
|
||||||
|
ProviderName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Provider is the interface for any component which will provide credentials
|
||||||
|
// Value. A provider is required to manage its own Expired state, and what to
|
||||||
|
// be expired means.
|
||||||
|
//
|
||||||
|
// The Provider should not need to implement its own mutexes, because
|
||||||
|
// that will be managed by Credentials.
|
||||||
|
type Provider interface {
|
||||||
|
// Refresh returns nil if it successfully retrieved the value.
|
||||||
|
// Error is returned if the value were not obtainable, or empty.
|
||||||
|
Retrieve() (Value, error)
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are no longer valid, and need
|
||||||
|
// to be retrieved.
|
||||||
|
IsExpired() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Expiry provides shared expiration logic to be used by credentials
|
||||||
|
// providers to implement expiry functionality.
|
||||||
|
//
|
||||||
|
// The best method to use this struct is as an anonymous field within the
|
||||||
|
// provider's struct.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// type EC2RoleProvider struct {
|
||||||
|
// Expiry
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
type Expiry struct {
|
||||||
|
// The date/time when to expire on
|
||||||
|
expiration time.Time
|
||||||
|
|
||||||
|
// If set will be used by IsExpired to determine the current time.
|
||||||
|
// Defaults to time.Now if CurrentTime is not set. Available for testing
|
||||||
|
// to be able to mock out the current time.
|
||||||
|
CurrentTime func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiration sets the expiration IsExpired will check when called.
|
||||||
|
//
|
||||||
|
// If window is greater than 0 the expiration time will be reduced by the
|
||||||
|
// window value.
|
||||||
|
//
|
||||||
|
// Using a window is helpful to trigger credentials to expire sooner than
|
||||||
|
// the expiration time given to ensure no requests are made with expired
|
||||||
|
// tokens.
|
||||||
|
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||||
|
e.expiration = expiration
|
||||||
|
if window > 0 {
|
||||||
|
e.expiration = e.expiration.Add(-window)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are expired.
|
||||||
|
func (e *Expiry) IsExpired() bool {
|
||||||
|
if e.CurrentTime == nil {
|
||||||
|
e.CurrentTime = time.Now
|
||||||
|
}
|
||||||
|
return e.expiration.Before(e.CurrentTime())
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
|
||||||
|
// Credentials will cache the credentials value until they expire. Once the value
|
||||||
|
// expires the next Get will attempt to retrieve valid credentials.
|
||||||
|
//
|
||||||
|
// Credentials is safe to use across multiple goroutines and will manage the
|
||||||
|
// synchronous state so the Providers do not need to implement their own
|
||||||
|
// synchronization.
|
||||||
|
//
|
||||||
|
// The first Credentials.Get() will always call Provider.Retrieve() to get the
|
||||||
|
// first instance of the credentials Value. All calls to Get() after that
|
||||||
|
// will return the cached credentials Value until IsExpired() returns true.
|
||||||
|
type Credentials struct {
|
||||||
|
creds Value
|
||||||
|
forceRefresh bool
|
||||||
|
m sync.Mutex
|
||||||
|
|
||||||
|
provider Provider
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentials returns a pointer to a new Credentials with the provider set.
|
||||||
|
func NewCredentials(provider Provider) *Credentials {
|
||||||
|
return &Credentials{
|
||||||
|
provider: provider,
|
||||||
|
forceRefresh: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the credentials value, or error if the credentials Value failed
|
||||||
|
// to be retrieved.
|
||||||
|
//
|
||||||
|
// Will return the cached credentials Value if it has not expired. If the
|
||||||
|
// credentials Value has expired the Provider's Retrieve() will be called
|
||||||
|
// to refresh the credentials.
|
||||||
|
//
|
||||||
|
// If Credentials.Expire() was called the credentials Value will be force
|
||||||
|
// expired, and the next call to Get() will cause them to be refreshed.
|
||||||
|
func (c *Credentials) Get() (Value, error) {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
if c.isExpired() {
|
||||||
|
creds, err := c.provider.Retrieve()
|
||||||
|
if err != nil {
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
c.creds = creds
|
||||||
|
c.forceRefresh = false
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.creds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expire expires the credentials and forces them to be retrieved on the
|
||||||
|
// next call to Get().
|
||||||
|
//
|
||||||
|
// This will override the Provider's expired state, and force Credentials
|
||||||
|
// to call the Provider's Retrieve().
|
||||||
|
func (c *Credentials) Expire() {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
c.forceRefresh = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are no longer valid, and need
|
||||||
|
// to be retrieved.
|
||||||
|
//
|
||||||
|
// If the Credentials were forced to be expired with Expire() this will
|
||||||
|
// reflect that override.
|
||||||
|
func (c *Credentials) IsExpired() bool {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
return c.isExpired()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isExpired helper method wrapping the definition of expired credentials.
|
||||||
|
func (c *Credentials) isExpired() bool {
|
||||||
|
return c.forceRefresh || c.provider.IsExpired()
|
||||||
|
}
|
178
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
Normal file
178
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
package ec2rolecreds
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProviderName provides a name of EC2Role provider
|
||||||
|
const ProviderName = "EC2RoleProvider"
|
||||||
|
|
||||||
|
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
|
||||||
|
// those credentials are expired.
|
||||||
|
//
|
||||||
|
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
|
||||||
|
// or ExpiryWindow
|
||||||
|
//
|
||||||
|
// p := &ec2rolecreds.EC2RoleProvider{
|
||||||
|
// // Pass in a custom timeout to be used when requesting
|
||||||
|
// // IAM EC2 Role credentials.
|
||||||
|
// Client: ec2metadata.New(sess, aws.Config{
|
||||||
|
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
|
||||||
|
// }),
|
||||||
|
//
|
||||||
|
// // Do not use early expiry of credentials. If a non zero value is
|
||||||
|
// // specified the credentials will be expired early
|
||||||
|
// ExpiryWindow: 0,
|
||||||
|
// }
|
||||||
|
type EC2RoleProvider struct {
|
||||||
|
credentials.Expiry
|
||||||
|
|
||||||
|
// Required EC2Metadata client to use when connecting to EC2 metadata service.
|
||||||
|
Client *ec2metadata.EC2Metadata
|
||||||
|
|
||||||
|
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||||
|
// the credentials actually expiring. This is beneficial so race conditions
|
||||||
|
// with expiring credentials do not cause request to fail unexpectedly
|
||||||
|
// due to ExpiredTokenException exceptions.
|
||||||
|
//
|
||||||
|
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||||
|
// 10 seconds before the credentials are actually expired.
|
||||||
|
//
|
||||||
|
// If ExpiryWindow is 0 or less it will be ignored.
|
||||||
|
ExpiryWindow time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentials returns a pointer to a new Credentials object wrapping
|
||||||
|
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
|
||||||
|
// The ConfigProvider is satisfied by the session.Session type.
|
||||||
|
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||||
|
p := &EC2RoleProvider{
|
||||||
|
Client: ec2metadata.New(c),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
|
||||||
|
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
|
||||||
|
// metadata service.
|
||||||
|
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||||
|
p := &EC2RoleProvider{
|
||||||
|
Client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve retrieves credentials from the EC2 service.
|
||||||
|
// Error will be returned if the request fails, or unable to extract
|
||||||
|
// the desired credentials.
|
||||||
|
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
||||||
|
credsList, err := requestCredList(m.Client)
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(credsList) == 0 {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
||||||
|
}
|
||||||
|
credsName := credsList[0]
|
||||||
|
|
||||||
|
roleCreds, err := requestCred(m.Client, credsName)
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
|
||||||
|
|
||||||
|
return credentials.Value{
|
||||||
|
AccessKeyID: roleCreds.AccessKeyID,
|
||||||
|
SecretAccessKey: roleCreds.SecretAccessKey,
|
||||||
|
SessionToken: roleCreds.Token,
|
||||||
|
ProviderName: ProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A ec2RoleCredRespBody provides the shape for unmarshalling credential
|
||||||
|
// request responses.
|
||||||
|
type ec2RoleCredRespBody struct {
|
||||||
|
// Success State
|
||||||
|
Expiration time.Time
|
||||||
|
AccessKeyID string
|
||||||
|
SecretAccessKey string
|
||||||
|
Token string
|
||||||
|
|
||||||
|
// Error state
|
||||||
|
Code string
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
const iamSecurityCredsPath = "/iam/security-credentials"
|
||||||
|
|
||||||
|
// requestCredList requests a list of credentials from the EC2 service.
|
||||||
|
// If there are no credentials, or there is an error making or receiving the request
|
||||||
|
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||||
|
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
credsList := []string{}
|
||||||
|
s := bufio.NewScanner(strings.NewReader(resp))
|
||||||
|
for s.Scan() {
|
||||||
|
credsList = append(credsList, s.Text())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
||||||
|
//
|
||||||
|
// If the credentials cannot be found, or there is an error reading the response
|
||||||
|
// and error will be returned.
|
||||||
|
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||||
|
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
|
||||||
|
if err != nil {
|
||||||
|
return ec2RoleCredRespBody{},
|
||||||
|
awserr.New("EC2RoleRequestError",
|
||||||
|
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
respCreds := ec2RoleCredRespBody{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
||||||
|
return ec2RoleCredRespBody{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if respCreds.Code != "Success" {
|
||||||
|
// If an error code was returned something failed requesting the role.
|
||||||
|
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return respCreds, nil
|
||||||
|
}
|
191
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
Normal file
191
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
// Package endpointcreds provides support for retrieving credentials from an
|
||||||
|
// arbitrary HTTP endpoint.
|
||||||
|
//
|
||||||
|
// The credentials endpoint Provider can receive both static and refreshable
|
||||||
|
// credentials that will expire. Credentials are static when an "Expiration"
|
||||||
|
// value is not provided in the endpoint's response.
|
||||||
|
//
|
||||||
|
// Static credentials will never expire once they have been retrieved. The format
|
||||||
|
// of the static credentials response:
|
||||||
|
// {
|
||||||
|
// "AccessKeyId" : "MUA...",
|
||||||
|
// "SecretAccessKey" : "/7PC5om....",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
|
||||||
|
// value in the response. The format of the refreshable credentials response:
|
||||||
|
// {
|
||||||
|
// "AccessKeyId" : "MUA...",
|
||||||
|
// "SecretAccessKey" : "/7PC5om....",
|
||||||
|
// "Token" : "AQoDY....=",
|
||||||
|
// "Expiration" : "2016-02-25T06:03:31Z"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Errors should be returned in the following format and only returned with 400
|
||||||
|
// or 500 HTTP status codes.
|
||||||
|
// {
|
||||||
|
// "code": "ErrorCode",
|
||||||
|
// "message": "Helpful error message."
|
||||||
|
// }
|
||||||
|
package endpointcreds
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProviderName is the name of the credentials provider.
|
||||||
|
const ProviderName = `CredentialsEndpointProvider`
|
||||||
|
|
||||||
|
// Provider satisfies the credentials.Provider interface, and is a client to
|
||||||
|
// retrieve credentials from an arbitrary endpoint.
|
||||||
|
type Provider struct {
|
||||||
|
staticCreds bool
|
||||||
|
credentials.Expiry
|
||||||
|
|
||||||
|
// Requires a AWS Client to make HTTP requests to the endpoint with.
|
||||||
|
// the Endpoint the request will be made to is provided by the aws.Config's
|
||||||
|
// Endpoint value.
|
||||||
|
Client *client.Client
|
||||||
|
|
||||||
|
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||||
|
// the credentials actually expiring. This is beneficial so race conditions
|
||||||
|
// with expiring credentials do not cause request to fail unexpectedly
|
||||||
|
// due to ExpiredTokenException exceptions.
|
||||||
|
//
|
||||||
|
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||||
|
// 10 seconds before the credentials are actually expired.
|
||||||
|
//
|
||||||
|
// If ExpiryWindow is 0 or less it will be ignored.
|
||||||
|
ExpiryWindow time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
|
||||||
|
// from arbitrary endpoint.
|
||||||
|
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
|
||||||
|
p := &Provider{
|
||||||
|
Client: client.New(
|
||||||
|
cfg,
|
||||||
|
metadata.ClientInfo{
|
||||||
|
ServiceName: "CredentialsEndpoint",
|
||||||
|
Endpoint: endpoint,
|
||||||
|
},
|
||||||
|
handlers,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
||||||
|
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||||
|
p.Client.Handlers.Validate.Clear()
|
||||||
|
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
|
||||||
|
// from an arbitrary endpoint concurrently. The client will request the
|
||||||
|
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
||||||
|
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
||||||
|
// retrieved.
|
||||||
|
func (p *Provider) IsExpired() bool {
|
||||||
|
if p.staticCreds {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return p.Expiry.IsExpired()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve will attempt to request the credentials from the endpoint the Provider
|
||||||
|
// was configured for. And error will be returned if the retrieval fails.
|
||||||
|
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||||
|
resp, err := p.getCredentials()
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName},
|
||||||
|
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Expiration != nil {
|
||||||
|
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
||||||
|
} else {
|
||||||
|
p.staticCreds = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.Value{
|
||||||
|
AccessKeyID: resp.AccessKeyID,
|
||||||
|
SecretAccessKey: resp.SecretAccessKey,
|
||||||
|
SessionToken: resp.Token,
|
||||||
|
ProviderName: ProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type getCredentialsOutput struct {
|
||||||
|
Expiration *time.Time
|
||||||
|
AccessKeyID string
|
||||||
|
SecretAccessKey string
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorOutput struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
|
||||||
|
op := &request.Operation{
|
||||||
|
Name: "GetCredentials",
|
||||||
|
HTTPMethod: "GET",
|
||||||
|
}
|
||||||
|
|
||||||
|
out := &getCredentialsOutput{}
|
||||||
|
req := p.Client.NewRequest(op, nil, out)
|
||||||
|
req.HTTPRequest.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
return out, req.Send()
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateEndpointHandler(r *request.Request) {
|
||||||
|
if len(r.ClientInfo.Endpoint) == 0 {
|
||||||
|
r.Error = aws.ErrMissingEndpoint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalHandler(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
|
||||||
|
out := r.Data.(*getCredentialsOutput)
|
||||||
|
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError",
|
||||||
|
"failed to decode endpoint credentials",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalError(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
|
||||||
|
var errOut errorOutput
|
||||||
|
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError",
|
||||||
|
"failed to decode endpoint credentials",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response body format is not consistent between metadata endpoints.
|
||||||
|
// Grab the error message as a string and include that as the source error
|
||||||
|
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
|
||||||
|
}
|
77
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
77
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EnvProviderName provides a name of Env provider
|
||||||
|
const EnvProviderName = "EnvProvider"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
|
||||||
|
// found in the process's environment.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
|
||||||
|
|
||||||
|
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
|
||||||
|
// can't be found in the process's environment.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A EnvProvider retrieves credentials from the environment variables of the
|
||||||
|
// running process. Environment credentials never expire.
|
||||||
|
//
|
||||||
|
// Environment variables used:
|
||||||
|
//
|
||||||
|
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
|
||||||
|
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
|
||||||
|
type EnvProvider struct {
|
||||||
|
retrieved bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEnvCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping the environment variable provider.
|
||||||
|
func NewEnvCredentials() *Credentials {
|
||||||
|
return NewCredentials(&EnvProvider{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve retrieves the keys from the environment.
|
||||||
|
func (e *EnvProvider) Retrieve() (Value, error) {
|
||||||
|
e.retrieved = false
|
||||||
|
|
||||||
|
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||||
|
if id == "" {
|
||||||
|
id = os.Getenv("AWS_ACCESS_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||||
|
if secret == "" {
|
||||||
|
secret = os.Getenv("AWS_SECRET_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == "" {
|
||||||
|
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if secret == "" {
|
||||||
|
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
e.retrieved = true
|
||||||
|
return Value{
|
||||||
|
AccessKeyID: id,
|
||||||
|
SecretAccessKey: secret,
|
||||||
|
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
||||||
|
ProviderName: EnvProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials have been retrieved.
|
||||||
|
func (e *EnvProvider) IsExpired() bool {
|
||||||
|
return !e.retrieved
|
||||||
|
}
|
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
[default]
|
||||||
|
aws_access_key_id = accessKey
|
||||||
|
aws_secret_access_key = secret
|
||||||
|
aws_session_token = token
|
||||||
|
|
||||||
|
[no_token]
|
||||||
|
aws_access_key_id = accessKey
|
||||||
|
aws_secret_access_key = secret
|
||||||
|
|
||||||
|
[with_colon]
|
||||||
|
aws_access_key_id: accessKey
|
||||||
|
aws_secret_access_key: secret
|
151
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
151
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/go-ini/ini"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SharedCredsProviderName provides a name of SharedCreds provider
|
||||||
|
const SharedCredsProviderName = "SharedCredentialsProvider"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A SharedCredentialsProvider retrieves credentials from the current user's home
|
||||||
|
// directory, and keeps track if those credentials are expired.
|
||||||
|
//
|
||||||
|
// Profile ini file example: $HOME/.aws/credentials
|
||||||
|
type SharedCredentialsProvider struct {
|
||||||
|
// Path to the shared credentials file.
|
||||||
|
//
|
||||||
|
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
||||||
|
// env value is empty will default to current user's home directory.
|
||||||
|
// Linux/OSX: "$HOME/.aws/credentials"
|
||||||
|
// Windows: "%USERPROFILE%\.aws\credentials"
|
||||||
|
Filename string
|
||||||
|
|
||||||
|
// AWS Profile to extract credentials from the shared credentials file. If empty
|
||||||
|
// will default to environment variable "AWS_PROFILE" or "default" if
|
||||||
|
// environment variable is also not set.
|
||||||
|
Profile string
|
||||||
|
|
||||||
|
// retrieved states if the credentials have been successfully retrieved.
|
||||||
|
retrieved bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSharedCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping the Profile file provider.
|
||||||
|
func NewSharedCredentials(filename, profile string) *Credentials {
|
||||||
|
return NewCredentials(&SharedCredentialsProvider{
|
||||||
|
Filename: filename,
|
||||||
|
Profile: profile,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve reads and extracts the shared credentials from the current
|
||||||
|
// users home directory.
|
||||||
|
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
|
||||||
|
p.retrieved = false
|
||||||
|
|
||||||
|
filename, err := p.filename()
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := loadProfile(filename, p.profile())
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.retrieved = true
|
||||||
|
return creds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the shared credentials have expired.
|
||||||
|
func (p *SharedCredentialsProvider) IsExpired() bool {
|
||||||
|
return !p.retrieved
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||||
|
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||||
|
// returned if it fails to read from the file, or the data is invalid.
|
||||||
|
func loadProfile(filename, profile string) (Value, error) {
|
||||||
|
config, err := ini.Load(filename)
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
|
||||||
|
}
|
||||||
|
iniProfile, err := config.GetSection(profile)
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := iniProfile.GetKey("aws_access_key_id")
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
|
||||||
|
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
secret, err := iniProfile.GetKey("aws_secret_access_key")
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
|
||||||
|
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
|
||||||
|
nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to empty string if not found
|
||||||
|
token := iniProfile.Key("aws_session_token")
|
||||||
|
|
||||||
|
return Value{
|
||||||
|
AccessKeyID: id.String(),
|
||||||
|
SecretAccessKey: secret.String(),
|
||||||
|
SessionToken: token.String(),
|
||||||
|
ProviderName: SharedCredsProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// filename returns the filename to use to read AWS shared credentials.
|
||||||
|
//
|
||||||
|
// Will return an error if the user's home directory path cannot be found.
|
||||||
|
func (p *SharedCredentialsProvider) filename() (string, error) {
|
||||||
|
if p.Filename == "" {
|
||||||
|
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
|
||||||
|
return p.Filename, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
homeDir := os.Getenv("HOME") // *nix
|
||||||
|
if homeDir == "" { // Windows
|
||||||
|
homeDir = os.Getenv("USERPROFILE")
|
||||||
|
}
|
||||||
|
if homeDir == "" {
|
||||||
|
return "", ErrSharedCredentialsHomeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Filename = filepath.Join(homeDir, ".aws", "credentials")
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Filename, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// profile returns the AWS shared credentials profile. If empty will read
|
||||||
|
// environment variable "AWS_PROFILE". If that is not set profile will
|
||||||
|
// return "default".
|
||||||
|
func (p *SharedCredentialsProvider) profile() string {
|
||||||
|
if p.Profile == "" {
|
||||||
|
p.Profile = os.Getenv("AWS_PROFILE")
|
||||||
|
}
|
||||||
|
if p.Profile == "" {
|
||||||
|
p.Profile = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Profile
|
||||||
|
}
|
57
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
57
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StaticProviderName provides a name of Static provider
|
||||||
|
const StaticProviderName = "StaticProvider"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A StaticProvider is a set of credentials which are set programmatically,
|
||||||
|
// and will never expire.
|
||||||
|
type StaticProvider struct {
|
||||||
|
Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStaticCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping a static credentials value provider.
|
||||||
|
func NewStaticCredentials(id, secret, token string) *Credentials {
|
||||||
|
return NewCredentials(&StaticProvider{Value: Value{
|
||||||
|
AccessKeyID: id,
|
||||||
|
SecretAccessKey: secret,
|
||||||
|
SessionToken: token,
|
||||||
|
}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
|
||||||
|
// wrapping the static credentials value provide. Same as NewStaticCredentials
|
||||||
|
// but takes the creds Value instead of individual fields
|
||||||
|
func NewStaticCredentialsFromCreds(creds Value) *Credentials {
|
||||||
|
return NewCredentials(&StaticProvider{Value: creds})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve returns the credentials or error if the credentials are invalid.
|
||||||
|
func (s *StaticProvider) Retrieve() (Value, error) {
|
||||||
|
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
|
||||||
|
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.Value.ProviderName) == 0 {
|
||||||
|
s.Value.ProviderName = StaticProviderName
|
||||||
|
}
|
||||||
|
return s.Value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are expired.
|
||||||
|
//
|
||||||
|
// For StaticProvider, the credentials never expired.
|
||||||
|
func (s *StaticProvider) IsExpired() bool {
|
||||||
|
return false
|
||||||
|
}
|
161
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
161
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
// Package stscreds are credential Providers to retrieve STS AWS credentials.
|
||||||
|
//
|
||||||
|
// STS provides multiple ways to retrieve credentials which can be used when making
|
||||||
|
// future AWS service API operation calls.
|
||||||
|
package stscreds
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/service/sts"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProviderName provides a name of AssumeRole provider
|
||||||
|
const ProviderName = "AssumeRoleProvider"
|
||||||
|
|
||||||
|
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
||||||
|
type AssumeRoler interface {
|
||||||
|
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultDuration is the default amount of time in minutes that the credentials
|
||||||
|
// will be valid for.
|
||||||
|
var DefaultDuration = time.Duration(15) * time.Minute
|
||||||
|
|
||||||
|
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
||||||
|
// keeps track of their expiration time. This provider must be used explicitly,
|
||||||
|
// as it is not included in the credentials chain.
|
||||||
|
type AssumeRoleProvider struct {
|
||||||
|
credentials.Expiry
|
||||||
|
|
||||||
|
// STS client to make assume role request with.
|
||||||
|
Client AssumeRoler
|
||||||
|
|
||||||
|
// Role to be assumed.
|
||||||
|
RoleARN string
|
||||||
|
|
||||||
|
// Session name, if you wish to reuse the credentials elsewhere.
|
||||||
|
RoleSessionName string
|
||||||
|
|
||||||
|
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
||||||
|
Duration time.Duration
|
||||||
|
|
||||||
|
// Optional ExternalID to pass along, defaults to nil if not set.
|
||||||
|
ExternalID *string
|
||||||
|
|
||||||
|
// The policy plain text must be 2048 bytes or shorter. However, an internal
|
||||||
|
// conversion compresses it into a packed binary format with a separate limit.
|
||||||
|
// The PackedPolicySize response element indicates by percentage how close to
|
||||||
|
// the upper size limit the policy is, with 100% equaling the maximum allowed
|
||||||
|
// size.
|
||||||
|
Policy *string
|
||||||
|
|
||||||
|
// The identification number of the MFA device that is associated with the user
|
||||||
|
// who is making the AssumeRole call. Specify this value if the trust policy
|
||||||
|
// of the role being assumed includes a condition that requires MFA authentication.
|
||||||
|
// The value is either the serial number for a hardware device (such as GAHT12345678)
|
||||||
|
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
|
||||||
|
SerialNumber *string
|
||||||
|
|
||||||
|
// The value provided by the MFA device, if the trust policy of the role being
|
||||||
|
// assumed requires MFA (that is, if the policy includes a condition that tests
|
||||||
|
// for MFA). If the role being assumed requires MFA and if the TokenCode value
|
||||||
|
// is missing or expired, the AssumeRole call returns an "access denied" error.
|
||||||
|
TokenCode *string
|
||||||
|
|
||||||
|
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||||
|
// the credentials actually expiring. This is beneficial so race conditions
|
||||||
|
// with expiring credentials do not cause request to fail unexpectedly
|
||||||
|
// due to ExpiredTokenException exceptions.
|
||||||
|
//
|
||||||
|
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||||
|
// 10 seconds before the credentials are actually expired.
|
||||||
|
//
|
||||||
|
// If ExpiryWindow is 0 or less it will be ignored.
|
||||||
|
ExpiryWindow time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||||
|
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||||
|
// role will be named after a nanosecond timestamp of this operation.
|
||||||
|
//
|
||||||
|
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||||
|
// satisfied by the session.Session type.
|
||||||
|
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||||
|
p := &AssumeRoleProvider{
|
||||||
|
Client: sts.New(c),
|
||||||
|
RoleARN: roleARN,
|
||||||
|
Duration: DefaultDuration,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
||||||
|
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||||
|
// role will be named after a nanosecond timestamp of this operation.
|
||||||
|
//
|
||||||
|
// Takes an AssumeRoler which can be satisfiede by the STS client.
|
||||||
|
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||||
|
p := &AssumeRoleProvider{
|
||||||
|
Client: svc,
|
||||||
|
RoleARN: roleARN,
|
||||||
|
Duration: DefaultDuration,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve generates a new set of temporary credentials using STS.
|
||||||
|
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||||
|
|
||||||
|
// Apply defaults where parameters are not set.
|
||||||
|
if p.RoleSessionName == "" {
|
||||||
|
// Try to work out a role name that will hopefully end up unique.
|
||||||
|
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
||||||
|
}
|
||||||
|
if p.Duration == 0 {
|
||||||
|
// Expire as often as AWS permits.
|
||||||
|
p.Duration = DefaultDuration
|
||||||
|
}
|
||||||
|
input := &sts.AssumeRoleInput{
|
||||||
|
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
|
||||||
|
RoleArn: aws.String(p.RoleARN),
|
||||||
|
RoleSessionName: aws.String(p.RoleSessionName),
|
||||||
|
ExternalId: p.ExternalID,
|
||||||
|
}
|
||||||
|
if p.Policy != nil {
|
||||||
|
input.Policy = p.Policy
|
||||||
|
}
|
||||||
|
if p.SerialNumber != nil && p.TokenCode != nil {
|
||||||
|
input.SerialNumber = p.SerialNumber
|
||||||
|
input.TokenCode = p.TokenCode
|
||||||
|
}
|
||||||
|
roleOutput, err := p.Client.AssumeRole(input)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We will proactively generate new credentials before they expire.
|
||||||
|
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
||||||
|
|
||||||
|
return credentials.Value{
|
||||||
|
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
|
||||||
|
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
||||||
|
SessionToken: *roleOutput.Credentials.SessionToken,
|
||||||
|
ProviderName: ProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
129
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
Normal file
129
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
// Package defaults is a collection of helpers to retrieve the SDK's default
|
||||||
|
// configuration and handlers.
|
||||||
|
//
|
||||||
|
// Generally this package shouldn't be used directly, but session.Session
|
||||||
|
// instead. This package is useful when you need to reset the defaults
|
||||||
|
// of a session or service client to the SDK defaults before setting
|
||||||
|
// additional parameters.
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/private/endpoints"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Defaults provides a collection of default values for SDK clients.
|
||||||
|
type Defaults struct {
|
||||||
|
Config *aws.Config
|
||||||
|
Handlers request.Handlers
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the SDK's default values with Config and handlers pre-configured.
|
||||||
|
func Get() Defaults {
|
||||||
|
cfg := Config()
|
||||||
|
handlers := Handlers()
|
||||||
|
cfg.Credentials = CredChain(cfg, handlers)
|
||||||
|
|
||||||
|
return Defaults{
|
||||||
|
Config: cfg,
|
||||||
|
Handlers: handlers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config returns the default configuration without credentials.
|
||||||
|
// To retrieve a config with credentials also included use
|
||||||
|
// `defaults.Get().Config` instead.
|
||||||
|
//
|
||||||
|
// Generally you shouldn't need to use this method directly, but
|
||||||
|
// is available if you need to reset the configuration of an
|
||||||
|
// existing service client or session.
|
||||||
|
func Config() *aws.Config {
|
||||||
|
return aws.NewConfig().
|
||||||
|
WithCredentials(credentials.AnonymousCredentials).
|
||||||
|
WithRegion(os.Getenv("AWS_REGION")).
|
||||||
|
WithHTTPClient(http.DefaultClient).
|
||||||
|
WithMaxRetries(aws.UseServiceDefaultRetries).
|
||||||
|
WithLogger(aws.NewDefaultLogger()).
|
||||||
|
WithLogLevel(aws.LogOff).
|
||||||
|
WithSleepDelay(time.Sleep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handlers returns the default request handlers.
|
||||||
|
//
|
||||||
|
// Generally you shouldn't need to use this method directly, but
|
||||||
|
// is available if you need to reset the request handlers of an
|
||||||
|
// existing service client or session.
|
||||||
|
func Handlers() request.Handlers {
|
||||||
|
var handlers request.Handlers
|
||||||
|
|
||||||
|
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||||
|
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||||
|
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||||
|
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||||
|
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||||
|
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
||||||
|
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||||
|
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
|
||||||
|
|
||||||
|
return handlers
|
||||||
|
}
|
||||||
|
|
||||||
|
// CredChain returns the default credential chain.
|
||||||
|
//
|
||||||
|
// Generally you shouldn't need to use this method directly, but
|
||||||
|
// is available if you need to reset the credentials of an
|
||||||
|
// existing service client or session's Config.
|
||||||
|
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
||||||
|
return credentials.NewCredentials(&credentials.ChainProvider{
|
||||||
|
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||||
|
Providers: []credentials.Provider{
|
||||||
|
&credentials.EnvProvider{},
|
||||||
|
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||||
|
RemoteCredProvider(*cfg, handlers),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteCredProvider returns a credenitials provider for the default remote
|
||||||
|
// endpoints such as EC2 or ECS Roles.
|
||||||
|
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||||
|
ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
|
||||||
|
|
||||||
|
if len(ecsCredURI) > 0 {
|
||||||
|
return ecsCredProvider(cfg, handlers, ecsCredURI)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ec2RoleProvider(cfg, handlers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider {
|
||||||
|
const host = `169.254.170.2`
|
||||||
|
|
||||||
|
return endpointcreds.NewProviderClient(cfg, handlers,
|
||||||
|
fmt.Sprintf("http://%s%s", host, uri),
|
||||||
|
func(p *endpointcreds.Provider) {
|
||||||
|
p.ExpiryWindow = 5 * time.Minute
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||||
|
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName,
|
||||||
|
aws.StringValue(cfg.Region), true, false)
|
||||||
|
|
||||||
|
return &ec2rolecreds.EC2RoleProvider{
|
||||||
|
Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion),
|
||||||
|
ExpiryWindow: 5 * time.Minute,
|
||||||
|
}
|
||||||
|
}
|
140
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
Normal file
140
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
package ec2metadata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetMetadata uses the path provided to request information from the EC2
|
||||||
|
// instance metdata service. The content will be returned as a string, or
|
||||||
|
// error if the request failed.
|
||||||
|
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||||
|
op := &request.Operation{
|
||||||
|
Name: "GetMetadata",
|
||||||
|
HTTPMethod: "GET",
|
||||||
|
HTTPPath: path.Join("/", "meta-data", p),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := &metadataOutput{}
|
||||||
|
req := c.NewRequest(op, nil, output)
|
||||||
|
|
||||||
|
return output.Content, req.Send()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDynamicData uses the path provided to request information from the EC2
|
||||||
|
// instance metadata service for dynamic data. The content will be returned
|
||||||
|
// as a string, or error if the request failed.
|
||||||
|
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||||
|
op := &request.Operation{
|
||||||
|
Name: "GetDynamicData",
|
||||||
|
HTTPMethod: "GET",
|
||||||
|
HTTPPath: path.Join("/", "dynamic", p),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := &metadataOutput{}
|
||||||
|
req := c.NewRequest(op, nil, output)
|
||||||
|
|
||||||
|
return output.Content, req.Send()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||||
|
// instance. Error is returned if the request fails or is unable to parse
|
||||||
|
// the response.
|
||||||
|
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
||||||
|
resp, err := c.GetDynamicData("instance-identity/document")
|
||||||
|
if err != nil {
|
||||||
|
return EC2InstanceIdentityDocument{},
|
||||||
|
awserr.New("EC2MetadataRequestError",
|
||||||
|
"failed to get EC2 instance identity document", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := EC2InstanceIdentityDocument{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
|
||||||
|
return EC2InstanceIdentityDocument{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
"failed to decode EC2 instance identity document", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return doc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IAMInfo retrieves IAM info from the metadata API
|
||||||
|
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||||
|
resp, err := c.GetMetadata("iam/info")
|
||||||
|
if err != nil {
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("EC2MetadataRequestError",
|
||||||
|
"failed to get EC2 IAM info", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
info := EC2IAMInfo{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
"failed to decode EC2 IAM info", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Code != "Success" {
|
||||||
|
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("EC2MetadataError", errMsg, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Region returns the region the instance is running in.
|
||||||
|
func (c *EC2Metadata) Region() (string, error) {
|
||||||
|
resp, err := c.GetMetadata("placement/availability-zone")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
|
||||||
|
return resp[:len(resp)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Available returns if the application has access to the EC2 Metadata service.
|
||||||
|
// Can be used to determine if application is running within an EC2 Instance and
|
||||||
|
// the metadata service is available.
|
||||||
|
func (c *EC2Metadata) Available() bool {
|
||||||
|
if _, err := c.GetMetadata("instance-id"); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EC2IAMInfo provides the shape for unmarshalling
|
||||||
|
// an IAM info from the metadata API
|
||||||
|
type EC2IAMInfo struct {
|
||||||
|
Code string
|
||||||
|
LastUpdated time.Time
|
||||||
|
InstanceProfileArn string
|
||||||
|
InstanceProfileID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EC2InstanceIdentityDocument provides the shape for unmarshalling
|
||||||
|
// an instance identity document
|
||||||
|
type EC2InstanceIdentityDocument struct {
|
||||||
|
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||||
|
AvailabilityZone string `json:"availabilityZone"`
|
||||||
|
PrivateIP string `json:"privateIp"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Region string `json:"region"`
|
||||||
|
InstanceID string `json:"instanceId"`
|
||||||
|
BillingProducts []string `json:"billingProducts"`
|
||||||
|
InstanceType string `json:"instanceType"`
|
||||||
|
AccountID string `json:"accountId"`
|
||||||
|
PendingTime time.Time `json:"pendingTime"`
|
||||||
|
ImageID string `json:"imageId"`
|
||||||
|
KernelID string `json:"kernelId"`
|
||||||
|
RamdiskID string `json:"ramdiskId"`
|
||||||
|
Architecture string `json:"architecture"`
|
||||||
|
}
|
124
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
Normal file
124
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
// Package ec2metadata provides the client for making API calls to the
|
||||||
|
// EC2 Metadata service.
|
||||||
|
package ec2metadata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceName is the name of the service.
|
||||||
|
const ServiceName = "ec2metadata"
|
||||||
|
|
||||||
|
// A EC2Metadata is an EC2 Metadata service Client.
|
||||||
|
type EC2Metadata struct {
|
||||||
|
*client.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the EC2Metadata client with a session.
|
||||||
|
// This client is safe to use across multiple goroutines.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// // Create a EC2Metadata client from just a session.
|
||||||
|
// svc := ec2metadata.New(mySession)
|
||||||
|
//
|
||||||
|
// // Create a EC2Metadata client with additional configuration
|
||||||
|
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
|
||||||
|
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
|
||||||
|
c := p.ClientConfig(ServiceName, cfgs...)
|
||||||
|
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a new EC2Metadata client. Should be used to create
|
||||||
|
// a client when not using a session. Generally using just New with a session
|
||||||
|
// is preferred.
|
||||||
|
//
|
||||||
|
// If an unmodified HTTP client is provided from the stdlib default, or no client
|
||||||
|
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
|
||||||
|
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
|
||||||
|
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
|
||||||
|
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
|
||||||
|
// If the http client is unmodified and this feature is not disabled
|
||||||
|
// set custom timeouts for EC2Metadata requests.
|
||||||
|
cfg.HTTPClient = &http.Client{
|
||||||
|
// use a shorter timeout than default because the metadata
|
||||||
|
// service is local if it is running, and to fail faster
|
||||||
|
// if not running on an ec2 instance.
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := &EC2Metadata{
|
||||||
|
Client: client.New(
|
||||||
|
cfg,
|
||||||
|
metadata.ClientInfo{
|
||||||
|
ServiceName: ServiceName,
|
||||||
|
Endpoint: endpoint,
|
||||||
|
APIVersion: "latest",
|
||||||
|
},
|
||||||
|
handlers,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
||||||
|
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||||
|
svc.Handlers.Validate.Clear()
|
||||||
|
svc.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||||
|
|
||||||
|
// Add additional options to the service config
|
||||||
|
for _, option := range opts {
|
||||||
|
option(svc.Client)
|
||||||
|
}
|
||||||
|
|
||||||
|
return svc
|
||||||
|
}
|
||||||
|
|
||||||
|
func httpClientZero(c *http.Client) bool {
|
||||||
|
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type metadataOutput struct {
|
||||||
|
Content string
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalHandler(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, ok := r.Data.(*metadataOutput); ok {
|
||||||
|
data.Content = b.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalError(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response body format is not consistent between metadata endpoints.
|
||||||
|
// Grab the error message as a string and include that as the source error
|
||||||
|
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateEndpointHandler(r *request.Request) {
|
||||||
|
if r.ClientInfo.Endpoint == "" {
|
||||||
|
r.Error = aws.ErrMissingEndpoint
|
||||||
|
}
|
||||||
|
}
|
17
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
Normal file
17
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrMissingRegion is an error that is returned if region configuration is
|
||||||
|
// not found.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
|
||||||
|
|
||||||
|
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
||||||
|
// resolved for a service.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
||||||
|
)
|
112
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
112
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A LogLevelType defines the level logging should be performed at. Used to instruct
|
||||||
|
// the SDK which statements should be logged.
|
||||||
|
type LogLevelType uint
|
||||||
|
|
||||||
|
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
|
||||||
|
// not being able to take the address of a non-composite literal.
|
||||||
|
func LogLevel(l LogLevelType) *LogLevelType {
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the LogLevel value or the default value LogOff if the LogLevel
|
||||||
|
// is nil. Safe to use on nil value LogLevelTypes.
|
||||||
|
func (l *LogLevelType) Value() LogLevelType {
|
||||||
|
if l != nil {
|
||||||
|
return *l
|
||||||
|
}
|
||||||
|
return LogOff
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
||||||
|
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
||||||
|
// LogLevel is nill, will default to LogOff comparison.
|
||||||
|
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
||||||
|
c := l.Value()
|
||||||
|
return c&v == v
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
||||||
|
// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
|
||||||
|
// to LogOff comparison.
|
||||||
|
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
||||||
|
c := l.Value()
|
||||||
|
return c >= v
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LogOff states that no logging should be performed by the SDK. This is the
|
||||||
|
// default state of the SDK, and should be use to disable all logging.
|
||||||
|
LogOff LogLevelType = iota * 0x1000
|
||||||
|
|
||||||
|
// LogDebug state that debug output should be logged by the SDK. This should
|
||||||
|
// be used to inspect request made and responses received.
|
||||||
|
LogDebug
|
||||||
|
)
|
||||||
|
|
||||||
|
// Debug Logging Sub Levels
|
||||||
|
const (
|
||||||
|
// LogDebugWithSigning states that the SDK should log request signing and
|
||||||
|
// presigning events. This should be used to log the signing details of
|
||||||
|
// requests for debugging. Will also enable LogDebug.
|
||||||
|
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
|
||||||
|
|
||||||
|
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
|
||||||
|
// HTTP bodys in addition to the headers and path. This should be used to
|
||||||
|
// see the body content of requests and responses made while using the SDK
|
||||||
|
// Will also enable LogDebug.
|
||||||
|
LogDebugWithHTTPBody
|
||||||
|
|
||||||
|
// LogDebugWithRequestRetries states the SDK should log when service requests will
|
||||||
|
// be retried. This should be used to log when you want to log when service
|
||||||
|
// requests are being retried. Will also enable LogDebug.
|
||||||
|
LogDebugWithRequestRetries
|
||||||
|
|
||||||
|
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
||||||
|
// to build, send, validate, or unmarshal.
|
||||||
|
LogDebugWithRequestErrors
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||||
|
// be used to provide custom logging writers for the SDK to use.
|
||||||
|
type Logger interface {
|
||||||
|
Log(...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// A LoggerFunc is a convenience type to convert a function taking a variadic
|
||||||
|
// list of arguments and wrap it so the Logger interface can be used.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
|
||||||
|
// fmt.Fprintln(os.Stdout, args...)
|
||||||
|
// })})
|
||||||
|
type LoggerFunc func(...interface{})
|
||||||
|
|
||||||
|
// Log calls the wrapped function with the arguments provided
|
||||||
|
func (f LoggerFunc) Log(args ...interface{}) {
|
||||||
|
f(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||||
|
// use same formatting runes as the stdlib log.Logger
|
||||||
|
func NewDefaultLogger() Logger {
|
||||||
|
return &defaultLogger{
|
||||||
|
logger: log.New(os.Stdout, "", log.LstdFlags),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
|
||||||
|
type defaultLogger struct {
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log logs the parameters to the stdlib logger. See log.Println.
|
||||||
|
func (l defaultLogger) Log(args ...interface{}) {
|
||||||
|
l.logger.Println(args...)
|
||||||
|
}
|
187
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
Normal file
187
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Handlers provides a collection of request handlers for various
|
||||||
|
// stages of handling requests.
|
||||||
|
type Handlers struct {
|
||||||
|
Validate HandlerList
|
||||||
|
Build HandlerList
|
||||||
|
Sign HandlerList
|
||||||
|
Send HandlerList
|
||||||
|
ValidateResponse HandlerList
|
||||||
|
Unmarshal HandlerList
|
||||||
|
UnmarshalMeta HandlerList
|
||||||
|
UnmarshalError HandlerList
|
||||||
|
Retry HandlerList
|
||||||
|
AfterRetry HandlerList
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns of this handler's lists.
|
||||||
|
func (h *Handlers) Copy() Handlers {
|
||||||
|
return Handlers{
|
||||||
|
Validate: h.Validate.copy(),
|
||||||
|
Build: h.Build.copy(),
|
||||||
|
Sign: h.Sign.copy(),
|
||||||
|
Send: h.Send.copy(),
|
||||||
|
ValidateResponse: h.ValidateResponse.copy(),
|
||||||
|
Unmarshal: h.Unmarshal.copy(),
|
||||||
|
UnmarshalError: h.UnmarshalError.copy(),
|
||||||
|
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||||
|
Retry: h.Retry.copy(),
|
||||||
|
AfterRetry: h.AfterRetry.copy(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear removes callback functions for all handlers
|
||||||
|
func (h *Handlers) Clear() {
|
||||||
|
h.Validate.Clear()
|
||||||
|
h.Build.Clear()
|
||||||
|
h.Send.Clear()
|
||||||
|
h.Sign.Clear()
|
||||||
|
h.Unmarshal.Clear()
|
||||||
|
h.UnmarshalMeta.Clear()
|
||||||
|
h.UnmarshalError.Clear()
|
||||||
|
h.ValidateResponse.Clear()
|
||||||
|
h.Retry.Clear()
|
||||||
|
h.AfterRetry.Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A HandlerListRunItem represents an entry in the HandlerList which
|
||||||
|
// is being run.
|
||||||
|
type HandlerListRunItem struct {
|
||||||
|
Index int
|
||||||
|
Handler NamedHandler
|
||||||
|
Request *Request
|
||||||
|
}
|
||||||
|
|
||||||
|
// A HandlerList manages zero or more handlers in a list.
|
||||||
|
type HandlerList struct {
|
||||||
|
list []NamedHandler
|
||||||
|
|
||||||
|
// Called after each request handler in the list is called. If set
|
||||||
|
// and the func returns true the HandlerList will continue to iterate
|
||||||
|
// over the request handlers. If false is returned the HandlerList
|
||||||
|
// will stop iterating.
|
||||||
|
//
|
||||||
|
// Should be used if extra logic to be performed between each handler
|
||||||
|
// in the list. This can be used to terminate a list's iteration
|
||||||
|
// based on a condition such as error like, HandlerListStopOnError.
|
||||||
|
// Or for logging like HandlerListLogItem.
|
||||||
|
AfterEachFn func(item HandlerListRunItem) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// A NamedHandler is a struct that contains a name and function callback.
|
||||||
|
type NamedHandler struct {
|
||||||
|
Name string
|
||||||
|
Fn func(*Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy creates a copy of the handler list.
|
||||||
|
func (l *HandlerList) copy() HandlerList {
|
||||||
|
n := HandlerList{
|
||||||
|
AfterEachFn: l.AfterEachFn,
|
||||||
|
}
|
||||||
|
n.list = append([]NamedHandler{}, l.list...)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the handler list.
|
||||||
|
func (l *HandlerList) Clear() {
|
||||||
|
l.list = []NamedHandler{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of handlers in the list.
|
||||||
|
func (l *HandlerList) Len() int {
|
||||||
|
return len(l.list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBack pushes handler f to the back of the handler list.
|
||||||
|
func (l *HandlerList) PushBack(f func(*Request)) {
|
||||||
|
l.list = append(l.list, NamedHandler{"__anonymous", f})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFront pushes handler f to the front of the handler list.
|
||||||
|
func (l *HandlerList) PushFront(f func(*Request)) {
|
||||||
|
l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBackNamed pushes named handler f to the back of the handler list.
|
||||||
|
func (l *HandlerList) PushBackNamed(n NamedHandler) {
|
||||||
|
l.list = append(l.list, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFrontNamed pushes named handler f to the front of the handler list.
|
||||||
|
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
|
||||||
|
l.list = append([]NamedHandler{n}, l.list...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes a NamedHandler n
|
||||||
|
func (l *HandlerList) Remove(n NamedHandler) {
|
||||||
|
newlist := []NamedHandler{}
|
||||||
|
for _, m := range l.list {
|
||||||
|
if m.Name != n.Name {
|
||||||
|
newlist = append(newlist, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.list = newlist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes all handlers in the list with a given request object.
|
||||||
|
func (l *HandlerList) Run(r *Request) {
|
||||||
|
for i, h := range l.list {
|
||||||
|
h.Fn(r)
|
||||||
|
item := HandlerListRunItem{
|
||||||
|
Index: i, Handler: h, Request: r,
|
||||||
|
}
|
||||||
|
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerListLogItem logs the request handler and the state of the
|
||||||
|
// request's Error value. Always returns true to continue iterating
|
||||||
|
// request handlers in a HandlerList.
|
||||||
|
func HandlerListLogItem(item HandlerListRunItem) bool {
|
||||||
|
if item.Request.Config.Logger == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
|
||||||
|
item.Index, item.Handler.Name, item.Request.Error)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerListStopOnError returns false to stop the HandlerList iterating
|
||||||
|
// over request handlers if Request.Error is not nil. True otherwise
|
||||||
|
// to continue iterating.
|
||||||
|
func HandlerListStopOnError(item HandlerListRunItem) bool {
|
||||||
|
return item.Request.Error == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
||||||
|
// header. If the extra parameters are provided they will be added as metadata to the
|
||||||
|
// name/version pair resulting in the following format.
|
||||||
|
// "name/version (extra0; extra1; ...)"
|
||||||
|
// The user agent part will be concatenated with this current request's user agent string.
|
||||||
|
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
|
||||||
|
ua := fmt.Sprintf("%s/%s", name, version)
|
||||||
|
if len(extra) > 0 {
|
||||||
|
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
|
||||||
|
}
|
||||||
|
return func(r *Request) {
|
||||||
|
AddToUserAgent(r, ua)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
|
||||||
|
// The input string will be concatenated with the current request's user agent string.
|
||||||
|
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
|
||||||
|
return func(r *Request) {
|
||||||
|
AddToUserAgent(r, s)
|
||||||
|
}
|
||||||
|
}
|
24
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
24
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||||
|
req := new(http.Request)
|
||||||
|
*req = *r
|
||||||
|
req.URL = &url.URL{}
|
||||||
|
*req.URL = *r.URL
|
||||||
|
req.Body = body
|
||||||
|
|
||||||
|
req.Header = http.Header{}
|
||||||
|
for k, v := range r.Header {
|
||||||
|
for _, vv := range v {
|
||||||
|
req.Header.Add(k, vv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
||||||
|
// with retrying requests
|
||||||
|
type offsetReader struct {
|
||||||
|
buf io.ReadSeeker
|
||||||
|
lock sync.RWMutex
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
|
||||||
|
reader := &offsetReader{}
|
||||||
|
buf.Seek(offset, 0)
|
||||||
|
|
||||||
|
reader.buf = buf
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a thread-safe close. Uses the write lock.
|
||||||
|
func (o *offsetReader) Close() error {
|
||||||
|
o.lock.Lock()
|
||||||
|
defer o.lock.Unlock()
|
||||||
|
o.closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read is a thread-safe read using a read lock.
|
||||||
|
func (o *offsetReader) Read(p []byte) (int, error) {
|
||||||
|
o.lock.RLock()
|
||||||
|
defer o.lock.RUnlock()
|
||||||
|
|
||||||
|
if o.closed {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.buf.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
|
||||||
|
// and close the old buffer.
|
||||||
|
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
|
||||||
|
o.Close()
|
||||||
|
return newOffsetReader(o.buf, offset)
|
||||||
|
}
|
326
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
Normal file
326
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
Normal file
|
@ -0,0 +1,326 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Request is the service request to be made.
|
||||||
|
type Request struct {
|
||||||
|
Config aws.Config
|
||||||
|
ClientInfo metadata.ClientInfo
|
||||||
|
Handlers Handlers
|
||||||
|
|
||||||
|
Retryer
|
||||||
|
Time time.Time
|
||||||
|
ExpireTime time.Duration
|
||||||
|
Operation *Operation
|
||||||
|
HTTPRequest *http.Request
|
||||||
|
HTTPResponse *http.Response
|
||||||
|
Body io.ReadSeeker
|
||||||
|
BodyStart int64 // offset from beginning of Body that the request body starts
|
||||||
|
Params interface{}
|
||||||
|
Error error
|
||||||
|
Data interface{}
|
||||||
|
RequestID string
|
||||||
|
RetryCount int
|
||||||
|
Retryable *bool
|
||||||
|
RetryDelay time.Duration
|
||||||
|
NotHoist bool
|
||||||
|
SignedHeaderVals http.Header
|
||||||
|
LastSignedAt time.Time
|
||||||
|
|
||||||
|
built bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Operation is the service API operation to be made.
|
||||||
|
type Operation struct {
|
||||||
|
Name string
|
||||||
|
HTTPMethod string
|
||||||
|
HTTPPath string
|
||||||
|
*Paginator
|
||||||
|
}
|
||||||
|
|
||||||
|
// Paginator keeps track of pagination configuration for an API operation.
|
||||||
|
type Paginator struct {
|
||||||
|
InputTokens []string
|
||||||
|
OutputTokens []string
|
||||||
|
LimitToken string
|
||||||
|
TruncationToken string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Request pointer for the service API
|
||||||
|
// operation and parameters.
|
||||||
|
//
|
||||||
|
// Params is any value of input parameters to be the request payload.
|
||||||
|
// Data is pointer value to an object which the request's response
|
||||||
|
// payload will be deserialized to.
|
||||||
|
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
||||||
|
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
|
||||||
|
|
||||||
|
method := operation.HTTPMethod
|
||||||
|
if method == "" {
|
||||||
|
method = "POST"
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReq, _ := http.NewRequest(method, "", nil)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
|
||||||
|
if err != nil {
|
||||||
|
httpReq.URL = &url.URL{}
|
||||||
|
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &Request{
|
||||||
|
Config: cfg,
|
||||||
|
ClientInfo: clientInfo,
|
||||||
|
Handlers: handlers.Copy(),
|
||||||
|
|
||||||
|
Retryer: retryer,
|
||||||
|
Time: time.Now(),
|
||||||
|
ExpireTime: 0,
|
||||||
|
Operation: operation,
|
||||||
|
HTTPRequest: httpReq,
|
||||||
|
Body: nil,
|
||||||
|
Params: params,
|
||||||
|
Error: err,
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
r.SetBufferBody([]byte{})
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// WillRetry returns if the request's can be retried.
|
||||||
|
func (r *Request) WillRetry() bool {
|
||||||
|
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParamsFilled returns if the request's parameters have been populated
|
||||||
|
// and the parameters are valid. False is returned if no parameters are
|
||||||
|
// provided or invalid.
|
||||||
|
func (r *Request) ParamsFilled() bool {
|
||||||
|
return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataFilled returns true if the request's data for response deserialization
|
||||||
|
// target has been set and is a valid. False is returned if data is not
|
||||||
|
// set, or is invalid.
|
||||||
|
func (r *Request) DataFilled() bool {
|
||||||
|
return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBufferBody will set the request's body bytes that will be sent to
|
||||||
|
// the service API.
|
||||||
|
func (r *Request) SetBufferBody(buf []byte) {
|
||||||
|
r.SetReaderBody(bytes.NewReader(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStringBody sets the body of the request to be backed by a string.
|
||||||
|
func (r *Request) SetStringBody(s string) {
|
||||||
|
r.SetReaderBody(strings.NewReader(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReaderBody will set the request's body reader.
|
||||||
|
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||||
|
r.HTTPRequest.Body = newOffsetReader(reader, 0)
|
||||||
|
r.Body = reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Presign returns the request's signed URL. Error will be returned
|
||||||
|
// if the signing fails.
|
||||||
|
func (r *Request) Presign(expireTime time.Duration) (string, error) {
|
||||||
|
r.ExpireTime = expireTime
|
||||||
|
r.NotHoist = false
|
||||||
|
r.Sign()
|
||||||
|
if r.Error != nil {
|
||||||
|
return "", r.Error
|
||||||
|
}
|
||||||
|
return r.HTTPRequest.URL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PresignRequest behaves just like presign, but hoists all headers and signs them.
|
||||||
|
// Also returns the signed hash back to the user
|
||||||
|
func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
|
||||||
|
r.ExpireTime = expireTime
|
||||||
|
r.NotHoist = true
|
||||||
|
r.Sign()
|
||||||
|
if r.Error != nil {
|
||||||
|
return "", nil, r.Error
|
||||||
|
}
|
||||||
|
return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
||||||
|
if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
retryStr := "not retrying"
|
||||||
|
if retrying {
|
||||||
|
retryStr = "will retry"
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
|
||||||
|
stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build will build the request's object so it can be signed and sent
|
||||||
|
// to the service. Build will also validate all the request's parameters.
|
||||||
|
// Anny additional build Handlers set on this request will be run
|
||||||
|
// in the order they were set.
|
||||||
|
//
|
||||||
|
// The request will only be built once. Multiple calls to build will have
|
||||||
|
// no effect.
|
||||||
|
//
|
||||||
|
// If any Validate or Build errors occur the build will stop and the error
|
||||||
|
// which occurred will be returned.
|
||||||
|
func (r *Request) Build() error {
|
||||||
|
if !r.built {
|
||||||
|
r.Handlers.Validate.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Validate Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
r.Handlers.Build.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Build Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
r.built = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign will sign the request returning error if errors are encountered.
|
||||||
|
//
|
||||||
|
// Send will build the request prior to signing. All Sign Handlers will
|
||||||
|
// be executed in the order they were set.
|
||||||
|
func (r *Request) Sign() error {
|
||||||
|
r.Build()
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Build Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Handlers.Sign.Run(r)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send will send the request returning error if errors are encountered.
|
||||||
|
//
|
||||||
|
// Send will sign the request prior to sending. All Send Handlers will
|
||||||
|
// be executed in the order they were set.
|
||||||
|
//
|
||||||
|
// Canceling a request is non-deterministic. If a request has been canceled,
|
||||||
|
// then the transport will choose, randomly, one of the state channels during
|
||||||
|
// reads or getting the connection.
|
||||||
|
//
|
||||||
|
// readLoop() and getConn(req *Request, cm connectMethod)
|
||||||
|
// https://github.com/golang/go/blob/master/src/net/http/transport.go
|
||||||
|
func (r *Request) Send() error {
|
||||||
|
for {
|
||||||
|
if aws.BoolValue(r.Retryable) {
|
||||||
|
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||||
|
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
var body io.ReadCloser
|
||||||
|
if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
|
||||||
|
body = reader.CloseAndCopy(r.BodyStart)
|
||||||
|
} else {
|
||||||
|
if r.Config.Logger != nil {
|
||||||
|
r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
|
||||||
|
}
|
||||||
|
r.Body.Seek(r.BodyStart, 0)
|
||||||
|
body = ioutil.NopCloser(r.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
|
||||||
|
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
|
||||||
|
// Closing response body. Since we are setting a new request to send off, this
|
||||||
|
// response will get squashed and leaked.
|
||||||
|
r.HTTPResponse.Body.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Sign()
|
||||||
|
if r.Error != nil {
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Retryable = nil
|
||||||
|
|
||||||
|
r.Handlers.Send.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
if strings.Contains(r.Error.Error(), "net/http: request canceled") {
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.Error
|
||||||
|
r.Handlers.Retry.Run(r)
|
||||||
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Send Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
debugLogReqError(r, "Send Request", true, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Handlers.UnmarshalMeta.Run(r)
|
||||||
|
r.Handlers.ValidateResponse.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
err := r.Error
|
||||||
|
r.Handlers.UnmarshalError.Run(r)
|
||||||
|
r.Handlers.Retry.Run(r)
|
||||||
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Validate Response", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
debugLogReqError(r, "Validate Response", true, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Handlers.Unmarshal.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
err := r.Error
|
||||||
|
r.Handlers.Retry.Run(r)
|
||||||
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Unmarshal Response", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
debugLogReqError(r, "Unmarshal Response", true, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddToUserAgent adds the string to the end of the request's current user agent.
|
||||||
|
func AddToUserAgent(r *Request, s string) {
|
||||||
|
curUA := r.HTTPRequest.Header.Get("User-Agent")
|
||||||
|
if len(curUA) > 0 {
|
||||||
|
s = curUA + " " + s
|
||||||
|
}
|
||||||
|
r.HTTPRequest.Header.Set("User-Agent", s)
|
||||||
|
}
|
104
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
Normal file
104
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
//type Paginater interface {
|
||||||
|
// HasNextPage() bool
|
||||||
|
// NextPage() *Request
|
||||||
|
// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
|
||||||
|
//}
|
||||||
|
|
||||||
|
// HasNextPage returns true if this request has more pages of data available.
|
||||||
|
func (r *Request) HasNextPage() bool {
|
||||||
|
return len(r.nextPageTokens()) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextPageTokens returns the tokens to use when asking for the next page of
|
||||||
|
// data.
|
||||||
|
func (r *Request) nextPageTokens() []interface{} {
|
||||||
|
if r.Operation.Paginator == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Operation.TruncationToken != "" {
|
||||||
|
tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
|
||||||
|
if len(tr) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := tr[0].(type) {
|
||||||
|
case *bool:
|
||||||
|
if !aws.BoolValue(v) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case bool:
|
||||||
|
if v == false {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens := []interface{}{}
|
||||||
|
tokenAdded := false
|
||||||
|
for _, outToken := range r.Operation.OutputTokens {
|
||||||
|
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
||||||
|
if len(v) > 0 {
|
||||||
|
tokens = append(tokens, v[0])
|
||||||
|
tokenAdded = true
|
||||||
|
} else {
|
||||||
|
tokens = append(tokens, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !tokenAdded {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextPage returns a new Request that can be executed to return the next
|
||||||
|
// page of result data. Call .Send() on this request to execute it.
|
||||||
|
func (r *Request) NextPage() *Request {
|
||||||
|
tokens := r.nextPageTokens()
|
||||||
|
if len(tokens) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
|
||||||
|
nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
|
||||||
|
for i, intok := range nr.Operation.InputTokens {
|
||||||
|
awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
|
||||||
|
}
|
||||||
|
return nr
|
||||||
|
}
|
||||||
|
|
||||||
|
// EachPage iterates over each page of a paginated request object. The fn
|
||||||
|
// parameter should be a function with the following sample signature:
|
||||||
|
//
|
||||||
|
// func(page *T, lastPage bool) bool {
|
||||||
|
// return true // return false to stop iterating
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Where "T" is the structure type matching the output structure of the given
|
||||||
|
// operation. For example, a request object generated by
|
||||||
|
// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
|
||||||
|
// as the structure "T". The lastPage value represents whether the page is
|
||||||
|
// the last page of data or not. The return value of this function should
|
||||||
|
// return true to keep iterating or false to stop.
|
||||||
|
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
|
||||||
|
for page := r; page != nil; page = page.NextPage() {
|
||||||
|
if err := page.Send(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
|
||||||
|
return page.Error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
101
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
Normal file
101
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Retryer is an interface to control retry logic for a given service.
|
||||||
|
// The default implementation used by most services is the service.DefaultRetryer
|
||||||
|
// structure, which contains basic retry logic using exponential backoff.
|
||||||
|
type Retryer interface {
|
||||||
|
RetryRules(*Request) time.Duration
|
||||||
|
ShouldRetry(*Request) bool
|
||||||
|
MaxRetries() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetryer sets a config Retryer value to the given Config returning it
|
||||||
|
// for chaining.
|
||||||
|
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
||||||
|
cfg.Retryer = retryer
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryableCodes is a collection of service response codes which are retry-able
|
||||||
|
// without any further action.
|
||||||
|
var retryableCodes = map[string]struct{}{
|
||||||
|
"RequestError": {},
|
||||||
|
"RequestTimeout": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
var throttleCodes = map[string]struct{}{
|
||||||
|
"ProvisionedThroughputExceededException": {},
|
||||||
|
"Throttling": {},
|
||||||
|
"ThrottlingException": {},
|
||||||
|
"RequestLimitExceeded": {},
|
||||||
|
"RequestThrottled": {},
|
||||||
|
"LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
|
||||||
|
"TooManyRequestsException": {}, // Lambda functions
|
||||||
|
}
|
||||||
|
|
||||||
|
// credsExpiredCodes is a collection of error codes which signify the credentials
|
||||||
|
// need to be refreshed. Expired tokens require refreshing of credentials, and
|
||||||
|
// resigning before the request can be retried.
|
||||||
|
var credsExpiredCodes = map[string]struct{}{
|
||||||
|
"ExpiredToken": {},
|
||||||
|
"ExpiredTokenException": {},
|
||||||
|
"RequestExpired": {}, // EC2 Only
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCodeThrottle(code string) bool {
|
||||||
|
_, ok := throttleCodes[code]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCodeRetryable(code string) bool {
|
||||||
|
if _, ok := retryableCodes[code]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return isCodeExpiredCreds(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCodeExpiredCreds(code string) bool {
|
||||||
|
_, ok := credsExpiredCodes[code]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrorRetryable returns whether the error is retryable, based on its Code.
|
||||||
|
// Returns false if the request has no Error set.
|
||||||
|
func (r *Request) IsErrorRetryable() bool {
|
||||||
|
if r.Error != nil {
|
||||||
|
if err, ok := r.Error.(awserr.Error); ok {
|
||||||
|
return isCodeRetryable(err.Code())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
||||||
|
// Returns false if the request has no Error set
|
||||||
|
func (r *Request) IsErrorThrottle() bool {
|
||||||
|
if r.Error != nil {
|
||||||
|
if err, ok := r.Error.(awserr.Error); ok {
|
||||||
|
return isCodeThrottle(err.Code())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrorExpired returns whether the error code is a credential expiry error.
|
||||||
|
// Returns false if the request has no Error set.
|
||||||
|
func (r *Request) IsErrorExpired() bool {
|
||||||
|
if r.Error != nil {
|
||||||
|
if err, ok := r.Error.(awserr.Error); ok {
|
||||||
|
return isCodeExpiredCreds(err.Code())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
Normal file
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
Normal file
|
@ -0,0 +1,234 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InvalidParameterErrCode is the error code for invalid parameters errors
|
||||||
|
InvalidParameterErrCode = "InvalidParameter"
|
||||||
|
// ParamRequiredErrCode is the error code for required parameter errors
|
||||||
|
ParamRequiredErrCode = "ParamRequiredError"
|
||||||
|
// ParamMinValueErrCode is the error code for fields with too low of a
|
||||||
|
// number value.
|
||||||
|
ParamMinValueErrCode = "ParamMinValueError"
|
||||||
|
// ParamMinLenErrCode is the error code for fields without enough elements.
|
||||||
|
ParamMinLenErrCode = "ParamMinLenError"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validator provides a way for types to perform validation logic on their
|
||||||
|
// input values that external code can use to determine if a type's values
|
||||||
|
// are valid.
|
||||||
|
type Validator interface {
|
||||||
|
Validate() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrInvalidParams provides wrapping of invalid parameter errors found when
|
||||||
|
// validating API operation input parameters.
|
||||||
|
type ErrInvalidParams struct {
|
||||||
|
// Context is the base context of the invalid parameter group.
|
||||||
|
Context string
|
||||||
|
errs []ErrInvalidParam
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a new invalid parameter error to the collection of invalid
|
||||||
|
// parameters. The context of the invalid parameter will be updated to reflect
|
||||||
|
// this collection.
|
||||||
|
func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
|
||||||
|
err.SetContext(e.Context)
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNested adds the invalid parameter errors from another ErrInvalidParams
|
||||||
|
// value into this collection. The nested errors will have their nested context
|
||||||
|
// updated and base context to reflect the merging.
|
||||||
|
//
|
||||||
|
// Use for nested validations errors.
|
||||||
|
func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
|
||||||
|
for _, err := range nested.errs {
|
||||||
|
err.SetContext(e.Context)
|
||||||
|
err.AddNestedContext(nestedCtx)
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of invalid parameter errors
|
||||||
|
func (e ErrInvalidParams) Len() int {
|
||||||
|
return len(e.errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the code of the error
|
||||||
|
func (e ErrInvalidParams) Code() string {
|
||||||
|
return InvalidParameterErrCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returns the message of the error
|
||||||
|
func (e ErrInvalidParams) Message() string {
|
||||||
|
return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string formatted form of the invalid parameters.
|
||||||
|
func (e ErrInvalidParams) Error() string {
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
|
||||||
|
|
||||||
|
for _, err := range e.errs {
|
||||||
|
fmt.Fprintf(w, "- %s\n", err.Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
|
||||||
|
func (e ErrInvalidParams) OrigErr() error {
|
||||||
|
return awserr.NewBatchError(
|
||||||
|
InvalidParameterErrCode, e.Message(), e.OrigErrs())
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErrs returns a slice of the invalid parameters
|
||||||
|
func (e ErrInvalidParams) OrigErrs() []error {
|
||||||
|
errs := make([]error, len(e.errs))
|
||||||
|
for i := 0; i < len(errs); i++ {
|
||||||
|
errs[i] = e.errs[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrInvalidParam represents an invalid parameter error type.
|
||||||
|
type ErrInvalidParam interface {
|
||||||
|
awserr.Error
|
||||||
|
|
||||||
|
// Field name the error occurred on.
|
||||||
|
Field() string
|
||||||
|
|
||||||
|
// SetContext updates the context of the error.
|
||||||
|
SetContext(string)
|
||||||
|
|
||||||
|
// AddNestedContext updates the error's context to include a nested level.
|
||||||
|
AddNestedContext(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
type errInvalidParam struct {
|
||||||
|
context string
|
||||||
|
nestedContext string
|
||||||
|
field string
|
||||||
|
code string
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the error code for the type of invalid parameter.
|
||||||
|
func (e *errInvalidParam) Code() string {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returns the reason the parameter was invalid, and its context.
|
||||||
|
func (e *errInvalidParam) Message() string {
|
||||||
|
return fmt.Sprintf("%s, %s.", e.msg, e.Field())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string version of the invalid parameter error.
|
||||||
|
func (e *errInvalidParam) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", e.code, e.Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr returns nil, Implemented for awserr.Error interface.
|
||||||
|
func (e *errInvalidParam) OrigErr() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field Returns the field and context the error occurred.
|
||||||
|
func (e *errInvalidParam) Field() string {
|
||||||
|
field := e.context
|
||||||
|
if len(field) > 0 {
|
||||||
|
field += "."
|
||||||
|
}
|
||||||
|
if len(e.nestedContext) > 0 {
|
||||||
|
field += fmt.Sprintf("%s.", e.nestedContext)
|
||||||
|
}
|
||||||
|
field += e.field
|
||||||
|
|
||||||
|
return field
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContext updates the base context of the error.
|
||||||
|
func (e *errInvalidParam) SetContext(ctx string) {
|
||||||
|
e.context = ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNestedContext prepends a context to the field's path.
|
||||||
|
func (e *errInvalidParam) AddNestedContext(ctx string) {
|
||||||
|
if len(e.nestedContext) == 0 {
|
||||||
|
e.nestedContext = ctx
|
||||||
|
} else {
|
||||||
|
e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrParamRequired represents an required parameter error.
|
||||||
|
type ErrParamRequired struct {
|
||||||
|
errInvalidParam
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrParamRequired creates a new required parameter error.
|
||||||
|
func NewErrParamRequired(field string) *ErrParamRequired {
|
||||||
|
return &ErrParamRequired{
|
||||||
|
errInvalidParam{
|
||||||
|
code: ParamRequiredErrCode,
|
||||||
|
field: field,
|
||||||
|
msg: fmt.Sprintf("missing required field"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrParamMinValue represents a minimum value parameter error.
|
||||||
|
type ErrParamMinValue struct {
|
||||||
|
errInvalidParam
|
||||||
|
min float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrParamMinValue creates a new minimum value parameter error.
|
||||||
|
func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
|
||||||
|
return &ErrParamMinValue{
|
||||||
|
errInvalidParam: errInvalidParam{
|
||||||
|
code: ParamMinValueErrCode,
|
||||||
|
field: field,
|
||||||
|
msg: fmt.Sprintf("minimum field value of %v", min),
|
||||||
|
},
|
||||||
|
min: min,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinValue returns the field's require minimum value.
|
||||||
|
//
|
||||||
|
// float64 is returned for both int and float min values.
|
||||||
|
func (e *ErrParamMinValue) MinValue() float64 {
|
||||||
|
return e.min
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrParamMinLen represents a minimum length parameter error.
|
||||||
|
type ErrParamMinLen struct {
|
||||||
|
errInvalidParam
|
||||||
|
min int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrParamMinLen creates a new minimum length parameter error.
|
||||||
|
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
|
||||||
|
return &ErrParamMinLen{
|
||||||
|
errInvalidParam: errInvalidParam{
|
||||||
|
code: ParamMinValueErrCode,
|
||||||
|
field: field,
|
||||||
|
msg: fmt.Sprintf("minimum field size of %v", min),
|
||||||
|
},
|
||||||
|
min: min,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinLen returns the field's required minimum length.
|
||||||
|
func (e *ErrParamMinLen) MinLen() int {
|
||||||
|
return e.min
|
||||||
|
}
|
223
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
Normal file
223
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
||||||
|
/*
|
||||||
|
Package session provides configuration for the SDK's service clients.
|
||||||
|
|
||||||
|
Sessions can be shared across all service clients that share the same base
|
||||||
|
configuration. The Session is built from the SDK's default configuration and
|
||||||
|
request handlers.
|
||||||
|
|
||||||
|
Sessions should be cached when possible, because creating a new Session will
|
||||||
|
load all configuration values from the environment, and config files each time
|
||||||
|
the Session is created. Sharing the Session value across all of your service
|
||||||
|
clients will ensure the configuration is loaded the fewest number of times possible.
|
||||||
|
|
||||||
|
Concurrency
|
||||||
|
|
||||||
|
Sessions are safe to use concurrently as long as the Session is not being
|
||||||
|
modified. The SDK will not modify the Session once the Session has been created.
|
||||||
|
Creating service clients concurrently from a shared Session is safe.
|
||||||
|
|
||||||
|
Sessions from Shared Config
|
||||||
|
|
||||||
|
Sessions can be created using the method above that will only load the
|
||||||
|
additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
|
||||||
|
Alternatively you can explicitly create a Session with shared config enabled.
|
||||||
|
To do this you can use NewSessionWithOptions to configure how the Session will
|
||||||
|
be created. Using the NewSessionWithOptions with SharedConfigState set to
|
||||||
|
SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG
|
||||||
|
environment variable was set.
|
||||||
|
|
||||||
|
Creating Sessions
|
||||||
|
|
||||||
|
When creating Sessions optional aws.Config values can be passed in that will
|
||||||
|
override the default, or loaded config values the Session is being created
|
||||||
|
with. This allows you to provide additional, or case based, configuration
|
||||||
|
as needed.
|
||||||
|
|
||||||
|
By default NewSession will only load credentials from the shared credentials
|
||||||
|
file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
|
||||||
|
set to a truthy value the Session will be created from the configuration
|
||||||
|
values from the shared config (~/.aws/config) and shared credentials
|
||||||
|
(~/.aws/credentials) files. See the section Sessions from Shared Config for
|
||||||
|
more information.
|
||||||
|
|
||||||
|
Create a Session with the default config and request handlers. With credentials
|
||||||
|
region, and profile loaded from the environment and shared config automatically.
|
||||||
|
Requires the AWS_PROFILE to be set, or "default" is used.
|
||||||
|
|
||||||
|
// Create Session
|
||||||
|
sess, err := session.NewSession()
|
||||||
|
|
||||||
|
// Create a Session with a custom region
|
||||||
|
sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")})
|
||||||
|
|
||||||
|
// Create a S3 client instance from a session
|
||||||
|
sess, err := session.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
// Handle Session creation error
|
||||||
|
}
|
||||||
|
svc := s3.New(sess)
|
||||||
|
|
||||||
|
Create Session With Option Overrides
|
||||||
|
|
||||||
|
In addition to NewSession, Sessions can be created using NewSessionWithOptions.
|
||||||
|
This func allows you to control and override how the Session will be created
|
||||||
|
through code instead of being driven by environment variables only.
|
||||||
|
|
||||||
|
Use NewSessionWithOptions when you want to provide the config profile, or
|
||||||
|
override the shared config state (AWS_SDK_LOAD_CONFIG).
|
||||||
|
|
||||||
|
// Equivalent to session.New
|
||||||
|
sess, err := session.NewSessionWithOptions(session.Options{})
|
||||||
|
|
||||||
|
// Specify profile to load for the session's config
|
||||||
|
sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
|
Profile: "profile_name",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Specify profile for config and region for requests
|
||||||
|
sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
|
Config: aws.Config{Region: aws.String("us-east-1")},
|
||||||
|
Profile: "profile_name",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Force enable Shared Config support
|
||||||
|
sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
|
SharedConfigState: SharedConfigEnable,
|
||||||
|
})
|
||||||
|
|
||||||
|
Adding Handlers
|
||||||
|
|
||||||
|
You can add handlers to a session for processing HTTP requests. All service
|
||||||
|
clients that use the session inherit the handlers. For example, the following
|
||||||
|
handler logs every request and its payload made by a service client:
|
||||||
|
|
||||||
|
// Create a session, and add additional handlers for all service
|
||||||
|
// clients created with the Session to inherit. Adds logging handler.
|
||||||
|
sess, err := session.NewSession()
|
||||||
|
sess.Handlers.Send.PushFront(func(r *request.Request) {
|
||||||
|
// Log every request made and its payload
|
||||||
|
logger.Println("Request: %s/%s, Payload: %s",
|
||||||
|
r.ClientInfo.ServiceName, r.Operation, r.Params)
|
||||||
|
})
|
||||||
|
|
||||||
|
Deprecated "New" function
|
||||||
|
|
||||||
|
The New session function has been deprecated because it does not provide good
|
||||||
|
way to return errors that occur when loading the configuration files and values.
|
||||||
|
Because of this, NewSession was created so errors can be retrieved when
|
||||||
|
creating a session fails.
|
||||||
|
|
||||||
|
Shared Config Fields
|
||||||
|
|
||||||
|
By default the SDK will only load the shared credentials file's (~/.aws/credentials)
|
||||||
|
credentials values, and all other config is provided by the environment variables,
|
||||||
|
SDK defaults, and user provided aws.Config values.
|
||||||
|
|
||||||
|
If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
|
||||||
|
option is used to create the Session the full shared config values will be
|
||||||
|
loaded. This includes credentials, region, and support for assume role. In
|
||||||
|
addition the Session will load its configuration from both the shared config
|
||||||
|
file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
|
||||||
|
files have the same format.
|
||||||
|
|
||||||
|
If both config files are present the configuration from both files will be
|
||||||
|
read. The Session will be created from configuration values from the shared
|
||||||
|
credentials file (~/.aws/credentials) over those in the shared credentials
|
||||||
|
file (~/.aws/config).
|
||||||
|
|
||||||
|
Credentials are the values the SDK should use for authenticating requests with
|
||||||
|
AWS Services. They arfrom a configuration file will need to include both
|
||||||
|
aws_access_key_id and aws_secret_access_key must be provided together in the
|
||||||
|
same file to be considered valid. The values will be ignored if not a complete
|
||||||
|
group. aws_session_token is an optional field that can be provided if both of
|
||||||
|
the other two fields are also provided.
|
||||||
|
|
||||||
|
aws_access_key_id = AKID
|
||||||
|
aws_secret_access_key = SECRET
|
||||||
|
aws_session_token = TOKEN
|
||||||
|
|
||||||
|
Assume Role values allow you to configure the SDK to assume an IAM role using
|
||||||
|
a set of credentials provided in a config file via the source_profile field.
|
||||||
|
Both "role_arn" and "source_profile" are required. The SDK does not support
|
||||||
|
assuming a role with MFA token Via the Session's constructor. You can use the
|
||||||
|
stscreds.AssumeRoleProvider credentials provider to specify custom
|
||||||
|
configuration and support for MFA.
|
||||||
|
|
||||||
|
role_arn = arn:aws:iam::<account_number>:role/<role_name>
|
||||||
|
source_profile = profile_with_creds
|
||||||
|
external_id = 1234
|
||||||
|
mfa_serial = not supported!
|
||||||
|
role_session_name = session_name
|
||||||
|
|
||||||
|
Region is the region the SDK should use for looking up AWS service endpoints
|
||||||
|
and signing requests.
|
||||||
|
|
||||||
|
region = us-east-1
|
||||||
|
|
||||||
|
Environment Variables
|
||||||
|
|
||||||
|
When a Session is created several environment variables can be set to adjust
|
||||||
|
how the SDK functions, and what configuration data it loads when creating
|
||||||
|
Sessions. All environment values are optional, but some values like credentials
|
||||||
|
require multiple of the values to set or the partial values will be ignored.
|
||||||
|
All environment variable values are strings unless otherwise noted.
|
||||||
|
|
||||||
|
Environment configuration values. If set both Access Key ID and Secret Access
|
||||||
|
Key must be provided. Session Token and optionally also be provided, but is
|
||||||
|
not required.
|
||||||
|
|
||||||
|
# Access Key ID
|
||||||
|
AWS_ACCESS_KEY_ID=AKID
|
||||||
|
AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
|
||||||
|
|
||||||
|
# Secret Access Key
|
||||||
|
AWS_SECRET_ACCESS_KEY=SECRET
|
||||||
|
AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
|
||||||
|
|
||||||
|
# Session Token
|
||||||
|
AWS_SESSION_TOKEN=TOKEN
|
||||||
|
|
||||||
|
Region value will instruct the SDK where to make service API requests to. If is
|
||||||
|
not provided in the environment the region must be provided before a service
|
||||||
|
client request is made.
|
||||||
|
|
||||||
|
AWS_REGION=us-east-1
|
||||||
|
|
||||||
|
# AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
|
||||||
|
# and AWS_REGION is not also set.
|
||||||
|
AWS_DEFAULT_REGION=us-east-1
|
||||||
|
|
||||||
|
Profile name the SDK should load use when loading shared config from the
|
||||||
|
configuration files. If not provided "default" will be used as the profile name.
|
||||||
|
|
||||||
|
AWS_PROFILE=my_profile
|
||||||
|
|
||||||
|
# AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
|
||||||
|
# and AWS_PROFILE is not also set.
|
||||||
|
AWS_DEFAULT_PROFILE=my_profile
|
||||||
|
|
||||||
|
SDK load config instructs the SDK to load the shared config in addition to
|
||||||
|
shared credentials. This also expands the configuration loaded so the shared
|
||||||
|
credentials will have parity with the shared config file. This also enables
|
||||||
|
Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
|
||||||
|
env values as well.
|
||||||
|
|
||||||
|
AWS_SDK_LOAD_CONFIG=1
|
||||||
|
|
||||||
|
Shared credentials file path can be set to instruct the SDK to use an alternative
|
||||||
|
file for the shared credentials. If not set the file will be loaded from
|
||||||
|
$HOME/.aws/credentials on Linux/Unix based systems, and
|
||||||
|
%USERPROFILE%\.aws\credentials on Windows.
|
||||||
|
|
||||||
|
AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
|
||||||
|
|
||||||
|
Shared config file path can be set to instruct the SDK to use an alternative
|
||||||
|
file for the shared config. If not set the file will be loaded from
|
||||||
|
$HOME/.aws/config on Linux/Unix based systems, and
|
||||||
|
%USERPROFILE%\.aws\config on Windows.
|
||||||
|
|
||||||
|
AWS_CONFIG_FILE=$HOME/my_shared_config
|
||||||
|
|
||||||
|
|
||||||
|
*/
|
||||||
|
package session
|
188
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
Normal file
188
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
package session
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
// envConfig is a collection of environment values the SDK will read
|
||||||
|
// setup config from. All environment values are optional. But some values
|
||||||
|
// such as credentials require multiple values to be complete or the values
|
||||||
|
// will be ignored.
|
||||||
|
type envConfig struct {
|
||||||
|
// Environment configuration values. If set both Access Key ID and Secret Access
|
||||||
|
// Key must be provided. Session Token and optionally also be provided, but is
|
||||||
|
// not required.
|
||||||
|
//
|
||||||
|
// # Access Key ID
|
||||||
|
// AWS_ACCESS_KEY_ID=AKID
|
||||||
|
// AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
|
||||||
|
//
|
||||||
|
// # Secret Access Key
|
||||||
|
// AWS_SECRET_ACCESS_KEY=SECRET
|
||||||
|
// AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
|
||||||
|
//
|
||||||
|
// # Session Token
|
||||||
|
// AWS_SESSION_TOKEN=TOKEN
|
||||||
|
Creds credentials.Value
|
||||||
|
|
||||||
|
// Region value will instruct the SDK where to make service API requests to. If is
|
||||||
|
// not provided in the environment the region must be provided before a service
|
||||||
|
// client request is made.
|
||||||
|
//
|
||||||
|
// AWS_REGION=us-east-1
|
||||||
|
//
|
||||||
|
// # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
|
||||||
|
// # and AWS_REGION is not also set.
|
||||||
|
// AWS_DEFAULT_REGION=us-east-1
|
||||||
|
Region string
|
||||||
|
|
||||||
|
// Profile name the SDK should load use when loading shared configuration from the
|
||||||
|
// shared configuration files. If not provided "default" will be used as the
|
||||||
|
// profile name.
|
||||||
|
//
|
||||||
|
// AWS_PROFILE=my_profile
|
||||||
|
//
|
||||||
|
// # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
|
||||||
|
// # and AWS_PROFILE is not also set.
|
||||||
|
// AWS_DEFAULT_PROFILE=my_profile
|
||||||
|
Profile string
|
||||||
|
|
||||||
|
// SDK load config instructs the SDK to load the shared config in addition to
|
||||||
|
// shared credentials. This also expands the configuration loaded from the shared
|
||||||
|
// credentials to have parity with the shared config file. This also enables
|
||||||
|
// Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
|
||||||
|
// env values as well.
|
||||||
|
//
|
||||||
|
// AWS_SDK_LOAD_CONFIG=1
|
||||||
|
EnableSharedConfig bool
|
||||||
|
|
||||||
|
// Shared credentials file path can be set to instruct the SDK to use an alternate
|
||||||
|
// file for the shared credentials. If not set the file will be loaded from
|
||||||
|
// $HOME/.aws/credentials on Linux/Unix based systems, and
|
||||||
|
// %USERPROFILE%\.aws\credentials on Windows.
|
||||||
|
//
|
||||||
|
// AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
|
||||||
|
SharedCredentialsFile string
|
||||||
|
|
||||||
|
// Shared config file path can be set to instruct the SDK to use an alternate
|
||||||
|
// file for the shared config. If not set the file will be loaded from
|
||||||
|
// $HOME/.aws/config on Linux/Unix based systems, and
|
||||||
|
// %USERPROFILE%\.aws\config on Windows.
|
||||||
|
//
|
||||||
|
// AWS_CONFIG_FILE=$HOME/my_shared_config
|
||||||
|
SharedConfigFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
credAccessEnvKey = []string{
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_ACCESS_KEY",
|
||||||
|
}
|
||||||
|
credSecretEnvKey = []string{
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"AWS_SECRET_KEY",
|
||||||
|
}
|
||||||
|
credSessionEnvKey = []string{
|
||||||
|
"AWS_SESSION_TOKEN",
|
||||||
|
}
|
||||||
|
|
||||||
|
regionEnvKeys = []string{
|
||||||
|
"AWS_REGION",
|
||||||
|
"AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
|
||||||
|
}
|
||||||
|
profileEnvKeys = []string{
|
||||||
|
"AWS_PROFILE",
|
||||||
|
"AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// loadEnvConfig retrieves the SDK's environment configuration.
|
||||||
|
// See `envConfig` for the values that will be retrieved.
|
||||||
|
//
|
||||||
|
// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
|
||||||
|
// the shared SDK config will be loaded in addition to the SDK's specific
|
||||||
|
// configuration values.
|
||||||
|
func loadEnvConfig() envConfig {
|
||||||
|
enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
|
||||||
|
return envConfigLoad(enableSharedConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
|
||||||
|
// SDK shared config. See `envConfig` for the values that will be retrieved.
|
||||||
|
//
|
||||||
|
// Loads the shared configuration in addition to the SDK's specific configuration.
|
||||||
|
// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
|
||||||
|
// environment variable is set.
|
||||||
|
func loadSharedEnvConfig() envConfig {
|
||||||
|
return envConfigLoad(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func envConfigLoad(enableSharedConfig bool) envConfig {
|
||||||
|
cfg := envConfig{}
|
||||||
|
|
||||||
|
cfg.EnableSharedConfig = enableSharedConfig
|
||||||
|
|
||||||
|
setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
|
||||||
|
setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
|
||||||
|
setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
|
||||||
|
|
||||||
|
// Require logical grouping of credentials
|
||||||
|
if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
|
||||||
|
cfg.Creds = credentials.Value{}
|
||||||
|
} else {
|
||||||
|
cfg.Creds.ProviderName = "EnvConfigCredentials"
|
||||||
|
}
|
||||||
|
|
||||||
|
regionKeys := regionEnvKeys
|
||||||
|
profileKeys := profileEnvKeys
|
||||||
|
if !cfg.EnableSharedConfig {
|
||||||
|
regionKeys = regionKeys[:1]
|
||||||
|
profileKeys = profileKeys[:1]
|
||||||
|
}
|
||||||
|
|
||||||
|
setFromEnvVal(&cfg.Region, regionKeys)
|
||||||
|
setFromEnvVal(&cfg.Profile, profileKeys)
|
||||||
|
|
||||||
|
cfg.SharedCredentialsFile = sharedCredentialsFilename()
|
||||||
|
cfg.SharedConfigFile = sharedConfigFilename()
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func setFromEnvVal(dst *string, keys []string) {
|
||||||
|
for _, k := range keys {
|
||||||
|
if v := os.Getenv(k); len(v) > 0 {
|
||||||
|
*dst = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sharedCredentialsFilename() string {
|
||||||
|
if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Join(userHomeDir(), ".aws", "credentials")
|
||||||
|
}
|
||||||
|
|
||||||
|
func sharedConfigFilename() string {
|
||||||
|
if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Join(userHomeDir(), ".aws", "config")
|
||||||
|
}
|
||||||
|
|
||||||
|
func userHomeDir() string {
|
||||||
|
homeDir := os.Getenv("HOME") // *nix
|
||||||
|
if len(homeDir) == 0 { // windows
|
||||||
|
homeDir = os.Getenv("USERPROFILE")
|
||||||
|
}
|
||||||
|
|
||||||
|
return homeDir
|
||||||
|
}
|
393
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
Normal file
393
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
Normal file
|
@ -0,0 +1,393 @@
|
||||||
|
package session
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/private/endpoints"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Session provides a central location to create service clients from and
|
||||||
|
// store configurations and request handlers for those services.
|
||||||
|
//
|
||||||
|
// Sessions are safe to create service clients concurrently, but it is not safe
|
||||||
|
// to mutate the Session concurrently.
|
||||||
|
//
|
||||||
|
// The Session satisfies the service client's client.ClientConfigProvider.
|
||||||
|
type Session struct {
|
||||||
|
Config *aws.Config
|
||||||
|
Handlers request.Handlers
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the handlers merging in the provided configs
|
||||||
|
// on top of the SDK's default configurations. Once the Session is created it
|
||||||
|
// can be mutated to modify the Config or Handlers. The Session is safe to be
|
||||||
|
// read concurrently, but it should not be written to concurrently.
|
||||||
|
//
|
||||||
|
// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
|
||||||
|
// method could now encounter an error when loading the configuration. When
|
||||||
|
// The environment variable is set, and an error occurs, New will return a
|
||||||
|
// session that will fail all requests reporting the error that occured while
|
||||||
|
// loading the session. Use NewSession to get the error when creating the
|
||||||
|
// session.
|
||||||
|
//
|
||||||
|
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
|
||||||
|
// the shared config file (~/.aws/config) will also be loaded, in addition to
|
||||||
|
// the shared credentials file (~/.aws/config). Values set in both the
|
||||||
|
// shared config, and shared credentials will be taken from the shared
|
||||||
|
// credentials file.
|
||||||
|
//
|
||||||
|
// Deprecated: Use NewSession functiions to create sessions instead. NewSession
|
||||||
|
// has the same functionality as New except an error can be returned when the
|
||||||
|
// func is called instead of waiting to receive an error until a request is made.
|
||||||
|
func New(cfgs ...*aws.Config) *Session {
|
||||||
|
// load initial config from environment
|
||||||
|
envCfg := loadEnvConfig()
|
||||||
|
|
||||||
|
if envCfg.EnableSharedConfig {
|
||||||
|
s, err := newSession(envCfg, cfgs...)
|
||||||
|
if err != nil {
|
||||||
|
// Old session.New expected all errors to be discovered when
|
||||||
|
// a request is made, and would report the errors then. This
|
||||||
|
// needs to be replicated if an error occurs while creating
|
||||||
|
// the session.
|
||||||
|
msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
|
||||||
|
"Use session.NewSession to handle errors occuring during session creation."
|
||||||
|
|
||||||
|
// Session creation failed, need to report the error and prevent
|
||||||
|
// any requests from succeeding.
|
||||||
|
s = &Session{Config: defaults.Config()}
|
||||||
|
s.Config.MergeIn(cfgs...)
|
||||||
|
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
|
||||||
|
s.Handlers.Validate.PushBack(func(r *request.Request) {
|
||||||
|
r.Error = err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldNewSession(cfgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSession returns a new Session created from SDK defaults, config files,
|
||||||
|
// environment, and user provided config files. Once the Session is created
|
||||||
|
// it can be mutated to modify the Config or Handlers. The Session is safe to
|
||||||
|
// be read concurrently, but it should not be written to concurrently.
|
||||||
|
//
|
||||||
|
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
|
||||||
|
// the shared config file (~/.aws/config) will also be loaded in addition to
|
||||||
|
// the shared credentials file (~/.aws/config). Values set in both the
|
||||||
|
// shared config, and shared credentials will be taken from the shared
|
||||||
|
// credentials file. Enabling the Shared Config will also allow the Session
|
||||||
|
// to be built with retrieving credentials with AssumeRole set in the config.
|
||||||
|
//
|
||||||
|
// See the NewSessionWithOptions func for information on how to override or
|
||||||
|
// control through code how the Session will be created. Such as specifing the
|
||||||
|
// config profile, and controlling if shared config is enabled or not.
|
||||||
|
func NewSession(cfgs ...*aws.Config) (*Session, error) {
|
||||||
|
envCfg := loadEnvConfig()
|
||||||
|
|
||||||
|
return newSession(envCfg, cfgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedConfigState provides the ability to optionally override the state
|
||||||
|
// of the session's creation based on the shared config being enabled or
|
||||||
|
// disabled.
|
||||||
|
type SharedConfigState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SharedConfigStateFromEnv does not override any state of the
|
||||||
|
// AWS_SDK_LOAD_CONFIG env var. It is the default value of the
|
||||||
|
// SharedConfigState type.
|
||||||
|
SharedConfigStateFromEnv SharedConfigState = iota
|
||||||
|
|
||||||
|
// SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
|
||||||
|
// and disables the shared config functionality.
|
||||||
|
SharedConfigDisable
|
||||||
|
|
||||||
|
// SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
|
||||||
|
// and enables the shared config functionality.
|
||||||
|
SharedConfigEnable
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options provides the means to control how a Session is created and what
|
||||||
|
// configuration values will be loaded.
|
||||||
|
//
|
||||||
|
type Options struct {
|
||||||
|
// Provides config values for the SDK to use when creating service clients
|
||||||
|
// and making API requests to services. Any value set in with this field
|
||||||
|
// will override the associated value provided by the SDK defaults,
|
||||||
|
// environment or config files where relevent.
|
||||||
|
//
|
||||||
|
// If not set, configuration values from from SDK defaults, environment,
|
||||||
|
// config will be used.
|
||||||
|
Config aws.Config
|
||||||
|
|
||||||
|
// Overrides the config profile the Session should be created from. If not
|
||||||
|
// set the value of the environment variable will be loaded (AWS_PROFILE,
|
||||||
|
// or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
|
||||||
|
//
|
||||||
|
// If not set and environment variables are not set the "default"
|
||||||
|
// (DefaultSharedConfigProfile) will be used as the profile to load the
|
||||||
|
// session config from.
|
||||||
|
Profile string
|
||||||
|
|
||||||
|
// Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
|
||||||
|
// environment variable. By default a Session will be created using the
|
||||||
|
// value provided by the AWS_SDK_LOAD_CONFIG environment variable.
|
||||||
|
//
|
||||||
|
// Setting this value to SharedConfigEnable or SharedConfigDisable
|
||||||
|
// will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
|
||||||
|
// and enable or disable the shared config functionality.
|
||||||
|
SharedConfigState SharedConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
|
||||||
|
// environment, and user provided config files. This func uses the Options
|
||||||
|
// values to configure how the Session is created.
|
||||||
|
//
|
||||||
|
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
|
||||||
|
// the shared config file (~/.aws/config) will also be loaded in addition to
|
||||||
|
// the shared credentials file (~/.aws/config). Values set in both the
|
||||||
|
// shared config, and shared credentials will be taken from the shared
|
||||||
|
// credentials file. Enabling the Shared Config will also allow the Session
|
||||||
|
// to be built with retrieving credentials with AssumeRole set in the config.
|
||||||
|
//
|
||||||
|
// // Equivalent to session.New
|
||||||
|
// sess, err := session.NewSessionWithOptions(session.Options{})
|
||||||
|
//
|
||||||
|
// // Specify profile to load for the session's config
|
||||||
|
// sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
|
// Profile: "profile_name",
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// // Specify profile for config and region for requests
|
||||||
|
// sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
|
// Config: aws.Config{Region: aws.String("us-east-1")},
|
||||||
|
// Profile: "profile_name",
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// // Force enable Shared Config support
|
||||||
|
// sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
|
// SharedConfigState: SharedConfigEnable,
|
||||||
|
// })
|
||||||
|
func NewSessionWithOptions(opts Options) (*Session, error) {
|
||||||
|
var envCfg envConfig
|
||||||
|
if opts.SharedConfigState == SharedConfigEnable {
|
||||||
|
envCfg = loadSharedEnvConfig()
|
||||||
|
} else {
|
||||||
|
envCfg = loadEnvConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opts.Profile) > 0 {
|
||||||
|
envCfg.Profile = opts.Profile
|
||||||
|
}
|
||||||
|
|
||||||
|
switch opts.SharedConfigState {
|
||||||
|
case SharedConfigDisable:
|
||||||
|
envCfg.EnableSharedConfig = false
|
||||||
|
case SharedConfigEnable:
|
||||||
|
envCfg.EnableSharedConfig = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return newSession(envCfg, &opts.Config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must is a helper function to ensure the Session is valid and there was no
|
||||||
|
// error when calling a NewSession function.
|
||||||
|
//
|
||||||
|
// This helper is intended to be used in variable initialization to load the
|
||||||
|
// Session and configuration at startup. Such as:
|
||||||
|
//
|
||||||
|
// var sess = session.Must(session.NewSession())
|
||||||
|
func Must(sess *Session, err error) *Session {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sess
|
||||||
|
}
|
||||||
|
|
||||||
|
func oldNewSession(cfgs ...*aws.Config) *Session {
|
||||||
|
cfg := defaults.Config()
|
||||||
|
handlers := defaults.Handlers()
|
||||||
|
|
||||||
|
// Apply the passed in configs so the configuration can be applied to the
|
||||||
|
// default credential chain
|
||||||
|
cfg.MergeIn(cfgs...)
|
||||||
|
cfg.Credentials = defaults.CredChain(cfg, handlers)
|
||||||
|
|
||||||
|
// Reapply any passed in configs to override credentials if set
|
||||||
|
cfg.MergeIn(cfgs...)
|
||||||
|
|
||||||
|
s := &Session{
|
||||||
|
Config: cfg,
|
||||||
|
Handlers: handlers,
|
||||||
|
}
|
||||||
|
|
||||||
|
initHandlers(s)
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
|
||||||
|
cfg := defaults.Config()
|
||||||
|
handlers := defaults.Handlers()
|
||||||
|
|
||||||
|
// Get a merged version of the user provided config to determine if
|
||||||
|
// credentials were.
|
||||||
|
userCfg := &aws.Config{}
|
||||||
|
userCfg.MergeIn(cfgs...)
|
||||||
|
|
||||||
|
// Order config files will be loaded in with later files overwriting
|
||||||
|
// previous config file values.
|
||||||
|
cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
|
||||||
|
if !envCfg.EnableSharedConfig {
|
||||||
|
// The shared config file (~/.aws/config) is only loaded if instructed
|
||||||
|
// to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
|
||||||
|
cfgFiles = cfgFiles[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load additional config from file(s)
|
||||||
|
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers)
|
||||||
|
|
||||||
|
s := &Session{
|
||||||
|
Config: cfg,
|
||||||
|
Handlers: handlers,
|
||||||
|
}
|
||||||
|
|
||||||
|
initHandlers(s)
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) {
|
||||||
|
// Merge in user provided configuration
|
||||||
|
cfg.MergeIn(userCfg)
|
||||||
|
|
||||||
|
// Region if not already set by user
|
||||||
|
if len(aws.StringValue(cfg.Region)) == 0 {
|
||||||
|
if len(envCfg.Region) > 0 {
|
||||||
|
cfg.WithRegion(envCfg.Region)
|
||||||
|
} else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
|
||||||
|
cfg.WithRegion(sharedCfg.Region)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure credentials if not already set
|
||||||
|
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
|
||||||
|
if len(envCfg.Creds.AccessKeyID) > 0 {
|
||||||
|
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
|
||||||
|
envCfg.Creds,
|
||||||
|
)
|
||||||
|
} else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
|
||||||
|
cfgCp := *cfg
|
||||||
|
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
|
||||||
|
sharedCfg.AssumeRoleSource.Creds,
|
||||||
|
)
|
||||||
|
cfg.Credentials = stscreds.NewCredentials(
|
||||||
|
&Session{
|
||||||
|
Config: &cfgCp,
|
||||||
|
Handlers: handlers.Copy(),
|
||||||
|
},
|
||||||
|
sharedCfg.AssumeRole.RoleARN,
|
||||||
|
func(opt *stscreds.AssumeRoleProvider) {
|
||||||
|
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
|
||||||
|
|
||||||
|
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
|
||||||
|
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MFA not supported
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
|
||||||
|
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
|
||||||
|
sharedCfg.Creds,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// Fallback to default credentials provider, include mock errors
|
||||||
|
// for the credential chain so user can identify why credentials
|
||||||
|
// failed to be retrieved.
|
||||||
|
cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
|
||||||
|
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||||
|
Providers: []credentials.Provider{
|
||||||
|
&credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
|
||||||
|
&credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
|
||||||
|
defaults.RemoteCredProvider(*cfg, handlers),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type credProviderError struct {
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
var emptyCreds = credentials.Value{}
|
||||||
|
|
||||||
|
func (c credProviderError) Retrieve() (credentials.Value, error) {
|
||||||
|
return credentials.Value{}, c.Err
|
||||||
|
}
|
||||||
|
func (c credProviderError) IsExpired() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func initHandlers(s *Session) {
|
||||||
|
// Add the Validate parameter handler if it is not disabled.
|
||||||
|
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
|
||||||
|
if !aws.BoolValue(s.Config.DisableParamValidation) {
|
||||||
|
s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy creates and returns a copy of the current Session, coping the config
|
||||||
|
// and handlers. If any additional configs are provided they will be merged
|
||||||
|
// on top of the Session's copied config.
|
||||||
|
//
|
||||||
|
// // Create a copy of the current Session, configured for the us-west-2 region.
|
||||||
|
// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
|
||||||
|
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
|
||||||
|
newSession := &Session{
|
||||||
|
Config: s.Config.Copy(cfgs...),
|
||||||
|
Handlers: s.Handlers.Copy(),
|
||||||
|
}
|
||||||
|
|
||||||
|
initHandlers(newSession)
|
||||||
|
|
||||||
|
return newSession
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientConfig satisfies the client.ConfigProvider interface and is used to
|
||||||
|
// configure the service client instances. Passing the Session to the service
|
||||||
|
// client's constructor (New) will use this method to configure the client.
|
||||||
|
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
|
||||||
|
s = s.Copy(cfgs...)
|
||||||
|
endpoint, signingRegion := endpoints.NormalizeEndpoint(
|
||||||
|
aws.StringValue(s.Config.Endpoint),
|
||||||
|
serviceName,
|
||||||
|
aws.StringValue(s.Config.Region),
|
||||||
|
aws.BoolValue(s.Config.DisableSSL),
|
||||||
|
aws.BoolValue(s.Config.UseDualStack),
|
||||||
|
)
|
||||||
|
|
||||||
|
return client.Config{
|
||||||
|
Config: s.Config,
|
||||||
|
Handlers: s.Handlers,
|
||||||
|
Endpoint: endpoint,
|
||||||
|
SigningRegion: signingRegion,
|
||||||
|
}
|
||||||
|
}
|
294
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
Normal file
294
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
Normal file
|
@ -0,0 +1,294 @@
|
||||||
|
package session
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/go-ini/ini"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Static Credentials group
|
||||||
|
accessKeyIDKey = `aws_access_key_id` // group required
|
||||||
|
secretAccessKey = `aws_secret_access_key` // group required
|
||||||
|
sessionTokenKey = `aws_session_token` // optional
|
||||||
|
|
||||||
|
// Assume Role Credentials group
|
||||||
|
roleArnKey = `role_arn` // group required
|
||||||
|
sourceProfileKey = `source_profile` // group required
|
||||||
|
externalIDKey = `external_id` // optional
|
||||||
|
mfaSerialKey = `mfa_serial` // optional
|
||||||
|
roleSessionNameKey = `role_session_name` // optional
|
||||||
|
|
||||||
|
// Additional Config fields
|
||||||
|
regionKey = `region`
|
||||||
|
|
||||||
|
// DefaultSharedConfigProfile is the default profile to be used when
|
||||||
|
// loading configuration from the config files if another profile name
|
||||||
|
// is not provided.
|
||||||
|
DefaultSharedConfigProfile = `default`
|
||||||
|
)
|
||||||
|
|
||||||
|
type assumeRoleConfig struct {
|
||||||
|
RoleARN string
|
||||||
|
SourceProfile string
|
||||||
|
ExternalID string
|
||||||
|
MFASerial string
|
||||||
|
RoleSessionName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// sharedConfig represents the configuration fields of the SDK config files.
|
||||||
|
type sharedConfig struct {
|
||||||
|
// Credentials values from the config file. Both aws_access_key_id
|
||||||
|
// and aws_secret_access_key must be provided together in the same file
|
||||||
|
// to be considered valid. The values will be ignored if not a complete group.
|
||||||
|
// aws_session_token is an optional field that can be provided if both of the
|
||||||
|
// other two fields are also provided.
|
||||||
|
//
|
||||||
|
// aws_access_key_id
|
||||||
|
// aws_secret_access_key
|
||||||
|
// aws_session_token
|
||||||
|
Creds credentials.Value
|
||||||
|
|
||||||
|
AssumeRole assumeRoleConfig
|
||||||
|
AssumeRoleSource *sharedConfig
|
||||||
|
|
||||||
|
// Region is the region the SDK should use for looking up AWS service endpoints
|
||||||
|
// and signing requests.
|
||||||
|
//
|
||||||
|
// region
|
||||||
|
Region string
|
||||||
|
}
|
||||||
|
|
||||||
|
type sharedConfigFile struct {
|
||||||
|
Filename string
|
||||||
|
IniData *ini.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadSharedConfig retrieves the configuration from the list of files
|
||||||
|
// using the profile provided. The order the files are listed will determine
|
||||||
|
// precedence. Values in subsequent files will overwrite values defined in
|
||||||
|
// earlier files.
|
||||||
|
//
|
||||||
|
// For example, given two files A and B. Both define credentials. If the order
|
||||||
|
// of the files are A then B, B's credential values will be used instead of A's.
|
||||||
|
//
|
||||||
|
// See sharedConfig.setFromFile for information how the config files
|
||||||
|
// will be loaded.
|
||||||
|
func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
|
||||||
|
if len(profile) == 0 {
|
||||||
|
profile = DefaultSharedConfigProfile
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := loadSharedConfigIniFiles(filenames)
|
||||||
|
if err != nil {
|
||||||
|
return sharedConfig{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := sharedConfig{}
|
||||||
|
if err = cfg.setFromIniFiles(profile, files); err != nil {
|
||||||
|
return sharedConfig{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cfg.AssumeRole.SourceProfile) > 0 {
|
||||||
|
if err := cfg.setAssumeRoleSource(profile, files); err != nil {
|
||||||
|
return sharedConfig{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
|
||||||
|
files := make([]sharedConfigFile, 0, len(filenames))
|
||||||
|
|
||||||
|
for _, filename := range filenames {
|
||||||
|
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||||
|
// Trim files from the list that don't exist.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := ini.Load(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, SharedConfigLoadError{Filename: filename}
|
||||||
|
}
|
||||||
|
|
||||||
|
files = append(files, sharedConfigFile{
|
||||||
|
Filename: filename, IniData: f,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
|
||||||
|
var assumeRoleSrc sharedConfig
|
||||||
|
|
||||||
|
// Multiple level assume role chains are not support
|
||||||
|
if cfg.AssumeRole.SourceProfile == origProfile {
|
||||||
|
assumeRoleSrc = *cfg
|
||||||
|
assumeRoleSrc.AssumeRole = assumeRoleConfig{}
|
||||||
|
} else {
|
||||||
|
err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
|
||||||
|
return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.AssumeRoleSource = &assumeRoleSrc
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
|
||||||
|
// Trim files from the list that don't exist.
|
||||||
|
for _, f := range files {
|
||||||
|
if err := cfg.setFromIniFile(profile, f); err != nil {
|
||||||
|
if _, ok := err.(SharedConfigProfileNotExistsError); ok {
|
||||||
|
// Ignore proviles missings
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFromFile loads the configuration from the file using
|
||||||
|
// the profile provided. A sharedConfig pointer type value is used so that
|
||||||
|
// multiple config file loadings can be chained.
|
||||||
|
//
|
||||||
|
// Only loads complete logically grouped values, and will not set fields in cfg
|
||||||
|
// for incomplete grouped values in the config. Such as credentials. For example
|
||||||
|
// if a config file only includes aws_access_key_id but no aws_secret_access_key
|
||||||
|
// the aws_access_key_id will be ignored.
|
||||||
|
func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
|
||||||
|
section, err := file.IniData.GetSection(profile)
|
||||||
|
if err != nil {
|
||||||
|
// Fallback to to alternate profile name: profile <name>
|
||||||
|
section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
|
||||||
|
if err != nil {
|
||||||
|
return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shared Credentials
|
||||||
|
akid := section.Key(accessKeyIDKey).String()
|
||||||
|
secret := section.Key(secretAccessKey).String()
|
||||||
|
if len(akid) > 0 && len(secret) > 0 {
|
||||||
|
cfg.Creds = credentials.Value{
|
||||||
|
AccessKeyID: akid,
|
||||||
|
SecretAccessKey: secret,
|
||||||
|
SessionToken: section.Key(sessionTokenKey).String(),
|
||||||
|
ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume Role
|
||||||
|
roleArn := section.Key(roleArnKey).String()
|
||||||
|
srcProfile := section.Key(sourceProfileKey).String()
|
||||||
|
if len(roleArn) > 0 && len(srcProfile) > 0 {
|
||||||
|
cfg.AssumeRole = assumeRoleConfig{
|
||||||
|
RoleARN: roleArn,
|
||||||
|
SourceProfile: srcProfile,
|
||||||
|
ExternalID: section.Key(externalIDKey).String(),
|
||||||
|
MFASerial: section.Key(mfaSerialKey).String(),
|
||||||
|
RoleSessionName: section.Key(roleSessionNameKey).String(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Region
|
||||||
|
if v := section.Key(regionKey).String(); len(v) > 0 {
|
||||||
|
cfg.Region = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedConfigLoadError is an error for the shared config file failed to load.
|
||||||
|
type SharedConfigLoadError struct {
|
||||||
|
Filename string
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code is the short id of the error.
|
||||||
|
func (e SharedConfigLoadError) Code() string {
|
||||||
|
return "SharedConfigLoadError"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message is the description of the error
|
||||||
|
func (e SharedConfigLoadError) Message() string {
|
||||||
|
return fmt.Sprintf("failed to load config file, %s", e.Filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr is the underlying error that caused the failure.
|
||||||
|
func (e SharedConfigLoadError) OrigErr() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error satisfies the error interface.
|
||||||
|
func (e SharedConfigLoadError) Error() string {
|
||||||
|
return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedConfigProfileNotExistsError is an error for the shared config when
|
||||||
|
// the profile was not find in the config file.
|
||||||
|
type SharedConfigProfileNotExistsError struct {
|
||||||
|
Profile string
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code is the short id of the error.
|
||||||
|
func (e SharedConfigProfileNotExistsError) Code() string {
|
||||||
|
return "SharedConfigProfileNotExistsError"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message is the description of the error
|
||||||
|
func (e SharedConfigProfileNotExistsError) Message() string {
|
||||||
|
return fmt.Sprintf("failed to get profile, %s", e.Profile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr is the underlying error that caused the failure.
|
||||||
|
func (e SharedConfigProfileNotExistsError) OrigErr() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error satisfies the error interface.
|
||||||
|
func (e SharedConfigProfileNotExistsError) Error() string {
|
||||||
|
return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedConfigAssumeRoleError is an error for the shared config when the
|
||||||
|
// profile contains assume role information, but that information is invalid
|
||||||
|
// or not complete.
|
||||||
|
type SharedConfigAssumeRoleError struct {
|
||||||
|
RoleARN string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code is the short id of the error.
|
||||||
|
func (e SharedConfigAssumeRoleError) Code() string {
|
||||||
|
return "SharedConfigAssumeRoleError"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message is the description of the error
|
||||||
|
func (e SharedConfigAssumeRoleError) Message() string {
|
||||||
|
return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
|
||||||
|
e.RoleARN)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr is the underlying error that caused the failure.
|
||||||
|
func (e SharedConfigAssumeRoleError) OrigErr() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error satisfies the error interface.
|
||||||
|
func (e SharedConfigAssumeRoleError) Error() string {
|
||||||
|
return awserr.SprintError(e.Code(), e.Message(), "", nil)
|
||||||
|
}
|
82
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
generated
vendored
Normal file
82
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// validator houses a set of rule needed for validation of a
|
||||||
|
// string value
|
||||||
|
type rules []rule
|
||||||
|
|
||||||
|
// rule interface allows for more flexible rules and just simply
|
||||||
|
// checks whether or not a value adheres to that rule
|
||||||
|
type rule interface {
|
||||||
|
IsValid(value string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid will iterate through all rules and see if any rules
|
||||||
|
// apply to the value and supports nested rules
|
||||||
|
func (r rules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if rule.IsValid(value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapRule generic rule for maps
|
||||||
|
type mapRule map[string]struct{}
|
||||||
|
|
||||||
|
// IsValid for the map rule satisfies whether it exists in the map
|
||||||
|
func (m mapRule) IsValid(value string) bool {
|
||||||
|
_, ok := m[value]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// whitelist is a generic rule for whitelisting
|
||||||
|
type whitelist struct {
|
||||||
|
rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for whitelist checks if the value is within the whitelist
|
||||||
|
func (w whitelist) IsValid(value string) bool {
|
||||||
|
return w.rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// blacklist is a generic rule for blacklisting
|
||||||
|
type blacklist struct {
|
||||||
|
rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid for whitelist checks if the value is within the whitelist
|
||||||
|
func (b blacklist) IsValid(value string) bool {
|
||||||
|
return !b.rule.IsValid(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
type patterns []string
|
||||||
|
|
||||||
|
// IsValid for patterns checks each pattern and returns if a match has
|
||||||
|
// been found
|
||||||
|
func (p patterns) IsValid(value string) bool {
|
||||||
|
for _, pattern := range p {
|
||||||
|
if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// inclusiveRules rules allow for rules to depend on one another
|
||||||
|
type inclusiveRules []rule
|
||||||
|
|
||||||
|
// IsValid will return true if all rules are true
|
||||||
|
func (r inclusiveRules) IsValid(value string) bool {
|
||||||
|
for _, rule := range r {
|
||||||
|
if !rule.IsValid(value) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
665
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
Normal file
665
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
Normal file
|
@ -0,0 +1,665 @@
|
||||||
|
// Package v4 implements signing for AWS V4 signer
|
||||||
|
//
|
||||||
|
// Provides request signing for request that need to be signed with
|
||||||
|
// AWS V4 Signatures.
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||||
|
timeFormat = "20060102T150405Z"
|
||||||
|
shortTimeFormat = "20060102"
|
||||||
|
|
||||||
|
// emptyStringSHA256 is a SHA256 of an empty string
|
||||||
|
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
||||||
|
)
|
||||||
|
|
||||||
|
var ignoredHeaders = rules{
|
||||||
|
blacklist{
|
||||||
|
mapRule{
|
||||||
|
"Authorization": struct{}{},
|
||||||
|
"User-Agent": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// requiredSignedHeaders is a whitelist for build canonical headers.
|
||||||
|
var requiredSignedHeaders = rules{
|
||||||
|
whitelist{
|
||||||
|
mapRule{
|
||||||
|
"Cache-Control": struct{}{},
|
||||||
|
"Content-Disposition": struct{}{},
|
||||||
|
"Content-Encoding": struct{}{},
|
||||||
|
"Content-Language": struct{}{},
|
||||||
|
"Content-Md5": struct{}{},
|
||||||
|
"Content-Type": struct{}{},
|
||||||
|
"Expires": struct{}{},
|
||||||
|
"If-Match": struct{}{},
|
||||||
|
"If-Modified-Since": struct{}{},
|
||||||
|
"If-None-Match": struct{}{},
|
||||||
|
"If-Unmodified-Since": struct{}{},
|
||||||
|
"Range": struct{}{},
|
||||||
|
"X-Amz-Acl": struct{}{},
|
||||||
|
"X-Amz-Copy-Source": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Range": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Grant-Full-control": struct{}{},
|
||||||
|
"X-Amz-Grant-Read": struct{}{},
|
||||||
|
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||||
|
"X-Amz-Grant-Write": struct{}{},
|
||||||
|
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||||
|
"X-Amz-Metadata-Directive": struct{}{},
|
||||||
|
"X-Amz-Mfa": struct{}{},
|
||||||
|
"X-Amz-Request-Payer": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Storage-Class": struct{}{},
|
||||||
|
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
patterns{"X-Amz-Meta-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// allowedHoisting is a whitelist for build query headers. The boolean value
|
||||||
|
// represents whether or not it is a pattern.
|
||||||
|
var allowedQueryHoisting = inclusiveRules{
|
||||||
|
blacklist{requiredSignedHeaders},
|
||||||
|
patterns{"X-Amz-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
||||||
|
// that need to be signed with AWS V4 Signatures.
|
||||||
|
type Signer struct {
|
||||||
|
// The authentication credentials the request will be signed against.
|
||||||
|
// This value must be set to sign requests.
|
||||||
|
Credentials *credentials.Credentials
|
||||||
|
|
||||||
|
// Sets the log level the signer should use when reporting information to
|
||||||
|
// the logger. If the logger is nil nothing will be logged. See
|
||||||
|
// aws.LogLevelType for more information on available logging levels
|
||||||
|
//
|
||||||
|
// By default nothing will be logged.
|
||||||
|
Debug aws.LogLevelType
|
||||||
|
|
||||||
|
// The logger loging information will be written to. If there the logger
|
||||||
|
// is nil, nothing will be logged.
|
||||||
|
Logger aws.Logger
|
||||||
|
|
||||||
|
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
||||||
|
// request header to the request's query string. This is most commonly used
|
||||||
|
// with pre-signed requests preventing headers from being added to the
|
||||||
|
// request's query string.
|
||||||
|
DisableHeaderHoisting bool
|
||||||
|
|
||||||
|
// currentTimeFn returns the time value which represents the current time.
|
||||||
|
// This value should only be used for testing. If it is nil the default
|
||||||
|
// time.Now will be used.
|
||||||
|
currentTimeFn func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigner returns a Signer pointer configured with the credentials and optional
|
||||||
|
// option values provided. If not options are provided the Signer will use its
|
||||||
|
// default configuration.
|
||||||
|
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
|
||||||
|
v4 := &Signer{
|
||||||
|
Credentials: credentials,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(v4)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v4
|
||||||
|
}
|
||||||
|
|
||||||
|
type signingCtx struct {
|
||||||
|
ServiceName string
|
||||||
|
Region string
|
||||||
|
Request *http.Request
|
||||||
|
Body io.ReadSeeker
|
||||||
|
Query url.Values
|
||||||
|
Time time.Time
|
||||||
|
ExpireTime time.Duration
|
||||||
|
SignedHeaderVals http.Header
|
||||||
|
|
||||||
|
credValues credentials.Value
|
||||||
|
isPresign bool
|
||||||
|
formattedTime string
|
||||||
|
formattedShortTime string
|
||||||
|
|
||||||
|
bodyDigest string
|
||||||
|
signedHeaders string
|
||||||
|
canonicalHeaders string
|
||||||
|
canonicalString string
|
||||||
|
credentialString string
|
||||||
|
stringToSign string
|
||||||
|
signature string
|
||||||
|
authorization string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs AWS v4 requests with the provided body, service name, region the
|
||||||
|
// request is made to, and time the request is signed at. The signTime allows
|
||||||
|
// you to specify that a request is signed for the future, and cannot be
|
||||||
|
// used until then.
|
||||||
|
//
|
||||||
|
// Returns a list of HTTP headers that were included in the signature or an
|
||||||
|
// error if signing the request failed. Generally for signed requests this value
|
||||||
|
// is not needed as the full request context will be captured by the http.Request
|
||||||
|
// value. It is included for reference though.
|
||||||
|
//
|
||||||
|
// Sign will set the request's Body to be the `body` parameter passed in. If
|
||||||
|
// the body is not already an io.ReadCloser, it will be wrapped within one. If
|
||||||
|
// a `nil` body parameter passed to Sign, the request's Body field will be
|
||||||
|
// also set to nil. Its important to note that this functionality will not
|
||||||
|
// change the request's ContentLength of the request.
|
||||||
|
//
|
||||||
|
// Sign differs from Presign in that it will sign the request using HTTP
|
||||||
|
// header values. This type of signing is intended for http.Request values that
|
||||||
|
// will not be shared, or are shared in a way the header values on the request
|
||||||
|
// will not be lost.
|
||||||
|
//
|
||||||
|
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||||
|
// generated. To bypass the signer computing the hash you can set the
|
||||||
|
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||||
|
// only compute the hash if the request header value is empty.
|
||||||
|
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
|
||||||
|
return v4.signWithBody(r, body, service, region, 0, signTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Presign signs AWS v4 requests with the provided body, service name, region
|
||||||
|
// the request is made to, and time the request is signed at. The signTime
|
||||||
|
// allows you to specify that a request is signed for the future, and cannot
|
||||||
|
// be used until then.
|
||||||
|
//
|
||||||
|
// Returns a list of HTTP headers that were included in the signature or an
|
||||||
|
// error if signing the request failed. For presigned requests these headers
|
||||||
|
// and their values must be included on the HTTP request when it is made. This
|
||||||
|
// is helpful to know what header values need to be shared with the party the
|
||||||
|
// presigned request will be distributed to.
|
||||||
|
//
|
||||||
|
// Presign differs from Sign in that it will sign the request using query string
|
||||||
|
// instead of header values. This allows you to share the Presigned Request's
|
||||||
|
// URL with third parties, or distribute it throughout your system with minimal
|
||||||
|
// dependencies.
|
||||||
|
//
|
||||||
|
// Presign also takes an exp value which is the duration the
|
||||||
|
// signed request will be valid after the signing time. This is allows you to
|
||||||
|
// set when the request will expire.
|
||||||
|
//
|
||||||
|
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||||
|
// generated. To bypass the signer computing the hash you can set the
|
||||||
|
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||||
|
// only compute the hash if the request header value is empty.
|
||||||
|
//
|
||||||
|
// Presigning a S3 request will not compute the body's SHA256 hash by default.
|
||||||
|
// This is done due to the general use case for S3 presigned URLs is to share
|
||||||
|
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
|
||||||
|
// presigned request's signature you can set the "X-Amz-Content-Sha256"
|
||||||
|
// HTTP header and that will be included in the request's signature.
|
||||||
|
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||||
|
return v4.signWithBody(r, body, service, region, exp, signTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||||
|
currentTimeFn := v4.currentTimeFn
|
||||||
|
if currentTimeFn == nil {
|
||||||
|
currentTimeFn = time.Now
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := &signingCtx{
|
||||||
|
Request: r,
|
||||||
|
Body: body,
|
||||||
|
Query: r.URL.Query(),
|
||||||
|
Time: signTime,
|
||||||
|
ExpireTime: exp,
|
||||||
|
isPresign: exp != 0,
|
||||||
|
ServiceName: service,
|
||||||
|
Region: region,
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.isRequestSigned() {
|
||||||
|
if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) {
|
||||||
|
// If the request is already signed, and the credentials have not
|
||||||
|
// expired, and the request is not too old ignore the signing request.
|
||||||
|
return ctx.SignedHeaderVals, nil
|
||||||
|
}
|
||||||
|
ctx.Time = currentTimeFn()
|
||||||
|
ctx.handlePresignRemoval()
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ctx.credValues, err = v4.Credentials.Get()
|
||||||
|
if err != nil {
|
||||||
|
return http.Header{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.assignAmzQueryValues()
|
||||||
|
ctx.build(v4.DisableHeaderHoisting)
|
||||||
|
|
||||||
|
// If the request is not presigned the body should be attached to it. This
|
||||||
|
// prevents the confusion of wanting to send a signed request without
|
||||||
|
// the body the request was signed for attached.
|
||||||
|
if !ctx.isPresign {
|
||||||
|
var reader io.ReadCloser
|
||||||
|
if body != nil {
|
||||||
|
var ok bool
|
||||||
|
if reader, ok = body.(io.ReadCloser); !ok {
|
||||||
|
reader = ioutil.NopCloser(body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.Body = reader
|
||||||
|
}
|
||||||
|
|
||||||
|
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
||||||
|
v4.logSigningInfo(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.SignedHeaderVals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) handlePresignRemoval() {
|
||||||
|
if !ctx.isPresign {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The credentials have expired for this request. The current signing
|
||||||
|
// is invalid, and needs to be request because the request will fail.
|
||||||
|
ctx.removePresign()
|
||||||
|
|
||||||
|
// Update the request's query string to ensure the values stays in
|
||||||
|
// sync in the case retrieving the new credentials fails.
|
||||||
|
ctx.Request.URL.RawQuery = ctx.Query.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) assignAmzQueryValues() {
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||||
|
if ctx.credValues.SessionToken != "" {
|
||||||
|
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||||
|
} else {
|
||||||
|
ctx.Query.Del("X-Amz-Security-Token")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.credValues.SessionToken != "" {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignRequestHandler is a named request handler the SDK will use to sign
|
||||||
|
// service client request with using the V4 signature.
|
||||||
|
var SignRequestHandler = request.NamedHandler{
|
||||||
|
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignSDKRequest signs an AWS request with the V4 signature. This
|
||||||
|
// request handler is bested used only with the SDK's built in service client's
|
||||||
|
// API operation requests.
|
||||||
|
//
|
||||||
|
// This function should not be used on its on its own, but in conjunction with
|
||||||
|
// an AWS service client's API operation call. To sign a standalone request
|
||||||
|
// not created by a service client's API operation method use the "Sign" or
|
||||||
|
// "Presign" functions of the "Signer" type.
|
||||||
|
//
|
||||||
|
// If the credentials of the request's config are set to
|
||||||
|
// credentials.AnonymousCredentials the request will not be signed.
|
||||||
|
func SignSDKRequest(req *request.Request) {
|
||||||
|
signSDKRequestWithCurrTime(req, time.Now)
|
||||||
|
}
|
||||||
|
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
|
||||||
|
// If the request does not need to be signed ignore the signing of the
|
||||||
|
// request if the AnonymousCredentials object is used.
|
||||||
|
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
region := req.ClientInfo.SigningRegion
|
||||||
|
if region == "" {
|
||||||
|
region = aws.StringValue(req.Config.Region)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := req.ClientInfo.SigningName
|
||||||
|
if name == "" {
|
||||||
|
name = req.ClientInfo.ServiceName
|
||||||
|
}
|
||||||
|
|
||||||
|
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
|
||||||
|
v4.Debug = req.Config.LogLevel.Value()
|
||||||
|
v4.Logger = req.Config.Logger
|
||||||
|
v4.DisableHeaderHoisting = req.NotHoist
|
||||||
|
v4.currentTimeFn = curTimeFn
|
||||||
|
})
|
||||||
|
|
||||||
|
signingTime := req.Time
|
||||||
|
if !req.LastSignedAt.IsZero() {
|
||||||
|
signingTime = req.LastSignedAt
|
||||||
|
}
|
||||||
|
|
||||||
|
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime)
|
||||||
|
if err != nil {
|
||||||
|
req.Error = err
|
||||||
|
req.SignedHeaderVals = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.SignedHeaderVals = signedHeaders
|
||||||
|
req.LastSignedAt = curTimeFn()
|
||||||
|
}
|
||||||
|
|
||||||
|
const logSignInfoMsg = `DEBUG: Request Signature:
|
||||||
|
---[ CANONICAL STRING ]-----------------------------
|
||||||
|
%s
|
||||||
|
---[ STRING TO SIGN ]--------------------------------
|
||||||
|
%s%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
const logSignedURLMsg = `
|
||||||
|
---[ SIGNED URL ]------------------------------------
|
||||||
|
%s`
|
||||||
|
|
||||||
|
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
||||||
|
signedURLMsg := ""
|
||||||
|
if ctx.isPresign {
|
||||||
|
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
|
||||||
|
}
|
||||||
|
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
|
||||||
|
v4.Logger.Log(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
||||||
|
ctx.buildTime() // no depends
|
||||||
|
ctx.buildCredentialString() // no depends
|
||||||
|
|
||||||
|
unsignedHeaders := ctx.Request.Header
|
||||||
|
if ctx.isPresign {
|
||||||
|
if !disableHeaderHoisting {
|
||||||
|
urlValues := url.Values{}
|
||||||
|
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
||||||
|
for k := range urlValues {
|
||||||
|
ctx.Query[k] = urlValues[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.buildBodyDigest()
|
||||||
|
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
||||||
|
ctx.buildCanonicalString() // depends on canon headers / signed headers
|
||||||
|
ctx.buildStringToSign() // depends on canon string
|
||||||
|
ctx.buildSignature() // depends on string to sign
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
|
||||||
|
} else {
|
||||||
|
parts := []string{
|
||||||
|
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
|
||||||
|
"SignedHeaders=" + ctx.signedHeaders,
|
||||||
|
"Signature=" + ctx.signature,
|
||||||
|
}
|
||||||
|
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildTime() {
|
||||||
|
ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
|
||||||
|
ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
duration := int64(ctx.ExpireTime / time.Second)
|
||||||
|
ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
|
||||||
|
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
||||||
|
} else {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildCredentialString() {
|
||||||
|
ctx.credentialString = strings.Join([]string{
|
||||||
|
ctx.formattedShortTime,
|
||||||
|
ctx.Region,
|
||||||
|
ctx.ServiceName,
|
||||||
|
"aws4_request",
|
||||||
|
}, "/")
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
||||||
|
query := url.Values{}
|
||||||
|
unsignedHeaders := http.Header{}
|
||||||
|
for k, h := range header {
|
||||||
|
if r.IsValid(k) {
|
||||||
|
query[k] = h
|
||||||
|
} else {
|
||||||
|
unsignedHeaders[k] = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return query, unsignedHeaders
|
||||||
|
}
|
||||||
|
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
|
||||||
|
var headers []string
|
||||||
|
headers = append(headers, "host")
|
||||||
|
for k, v := range header {
|
||||||
|
canonicalKey := http.CanonicalHeaderKey(k)
|
||||||
|
if !r.IsValid(canonicalKey) {
|
||||||
|
continue // ignored header
|
||||||
|
}
|
||||||
|
if ctx.SignedHeaderVals == nil {
|
||||||
|
ctx.SignedHeaderVals = make(http.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerCaseKey := strings.ToLower(k)
|
||||||
|
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
|
||||||
|
// include additional values
|
||||||
|
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = append(headers, lowerCaseKey)
|
||||||
|
ctx.SignedHeaderVals[lowerCaseKey] = v
|
||||||
|
}
|
||||||
|
sort.Strings(headers)
|
||||||
|
|
||||||
|
ctx.signedHeaders = strings.Join(headers, ";")
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
headerValues := make([]string, len(headers))
|
||||||
|
for i, k := range headers {
|
||||||
|
if k == "host" {
|
||||||
|
headerValues[i] = "host:" + ctx.Request.URL.Host
|
||||||
|
} else {
|
||||||
|
headerValues[i] = k + ":" +
|
||||||
|
strings.Join(ctx.SignedHeaderVals[k], ",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildCanonicalString() {
|
||||||
|
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
|
||||||
|
uri := ctx.Request.URL.Opaque
|
||||||
|
if uri != "" {
|
||||||
|
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||||
|
} else {
|
||||||
|
uri = ctx.Request.URL.Path
|
||||||
|
}
|
||||||
|
if uri == "" {
|
||||||
|
uri = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.ServiceName != "s3" {
|
||||||
|
uri = rest.EscapePath(uri, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.canonicalString = strings.Join([]string{
|
||||||
|
ctx.Request.Method,
|
||||||
|
uri,
|
||||||
|
ctx.Request.URL.RawQuery,
|
||||||
|
ctx.canonicalHeaders + "\n",
|
||||||
|
ctx.signedHeaders,
|
||||||
|
ctx.bodyDigest,
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildStringToSign() {
|
||||||
|
ctx.stringToSign = strings.Join([]string{
|
||||||
|
authHeaderPrefix,
|
||||||
|
ctx.formattedTime,
|
||||||
|
ctx.credentialString,
|
||||||
|
hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildSignature() {
|
||||||
|
secret := ctx.credValues.SecretAccessKey
|
||||||
|
date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
|
||||||
|
region := makeHmac(date, []byte(ctx.Region))
|
||||||
|
service := makeHmac(region, []byte(ctx.ServiceName))
|
||||||
|
credentials := makeHmac(service, []byte("aws4_request"))
|
||||||
|
signature := makeHmac(credentials, []byte(ctx.stringToSign))
|
||||||
|
ctx.signature = hex.EncodeToString(signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildBodyDigest() {
|
||||||
|
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
||||||
|
if hash == "" {
|
||||||
|
if ctx.isPresign && ctx.ServiceName == "s3" {
|
||||||
|
hash = "UNSIGNED-PAYLOAD"
|
||||||
|
} else if ctx.Body == nil {
|
||||||
|
hash = emptyStringSHA256
|
||||||
|
} else {
|
||||||
|
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
|
||||||
|
}
|
||||||
|
if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.bodyDigest = hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRequestSigned returns if the request is currently signed or presigned
|
||||||
|
func (ctx *signingCtx) isRequestSigned() bool {
|
||||||
|
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if ctx.Request.Header.Get("Authorization") != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsign removes signing flags for both signed and presigned requests.
|
||||||
|
func (ctx *signingCtx) removePresign() {
|
||||||
|
ctx.Query.Del("X-Amz-Algorithm")
|
||||||
|
ctx.Query.Del("X-Amz-Signature")
|
||||||
|
ctx.Query.Del("X-Amz-Security-Token")
|
||||||
|
ctx.Query.Del("X-Amz-Date")
|
||||||
|
ctx.Query.Del("X-Amz-Expires")
|
||||||
|
ctx.Query.Del("X-Amz-Credential")
|
||||||
|
ctx.Query.Del("X-Amz-SignedHeaders")
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeHmac(key []byte, data []byte) []byte {
|
||||||
|
hash := hmac.New(sha256.New, key)
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeSha256(data []byte) []byte {
|
||||||
|
hash := sha256.New()
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||||
|
hash := sha256.New()
|
||||||
|
start, _ := reader.Seek(0, 1)
|
||||||
|
defer reader.Seek(start, 0)
|
||||||
|
|
||||||
|
io.Copy(hash, reader)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const doubleSpaces = " "
|
||||||
|
|
||||||
|
var doubleSpaceBytes = []byte(doubleSpaces)
|
||||||
|
|
||||||
|
func stripExcessSpaces(headerVals []string) []string {
|
||||||
|
vals := make([]string, len(headerVals))
|
||||||
|
for i, str := range headerVals {
|
||||||
|
// Trim leading and trailing spaces
|
||||||
|
trimmed := strings.TrimSpace(str)
|
||||||
|
|
||||||
|
idx := strings.Index(trimmed, doubleSpaces)
|
||||||
|
var buf []byte
|
||||||
|
for idx > -1 {
|
||||||
|
// Multiple adjacent spaces found
|
||||||
|
if buf == nil {
|
||||||
|
// first time create the buffer
|
||||||
|
buf = []byte(trimmed)
|
||||||
|
}
|
||||||
|
|
||||||
|
stripToIdx := -1
|
||||||
|
for j := idx + 1; j < len(buf); j++ {
|
||||||
|
if buf[j] != ' ' {
|
||||||
|
buf = append(buf[:idx+1], buf[j:]...)
|
||||||
|
stripToIdx = j
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if stripToIdx >= 0 {
|
||||||
|
idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
|
||||||
|
if idx >= 0 {
|
||||||
|
idx += stripToIdx
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
idx = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if buf != nil {
|
||||||
|
vals[i] = string(buf)
|
||||||
|
} else {
|
||||||
|
vals[i] = trimmed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
106
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
Normal file
106
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
|
||||||
|
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
|
||||||
|
return ReaderSeekerCloser{r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
|
||||||
|
// io.Closer interfaces to the underlying object if they are available.
|
||||||
|
type ReaderSeekerCloser struct {
|
||||||
|
r io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads from the reader up to size of p. The number of bytes read, and
|
||||||
|
// error if it occurred will be returned.
|
||||||
|
//
|
||||||
|
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
|
||||||
|
//
|
||||||
|
// Performs the same functionality as io.Reader Read
|
||||||
|
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
|
||||||
|
switch t := r.r.(type) {
|
||||||
|
case io.Reader:
|
||||||
|
return t.Read(p)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek sets the offset for the next Read to offset, interpreted according to
|
||||||
|
// whence: 0 means relative to the origin of the file, 1 means relative to the
|
||||||
|
// current offset, and 2 means relative to the end. Seek returns the new offset
|
||||||
|
// and an error, if any.
|
||||||
|
//
|
||||||
|
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
|
||||||
|
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
switch t := r.r.(type) {
|
||||||
|
case io.Seeker:
|
||||||
|
return t.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
return int64(0), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the ReaderSeekerCloser.
|
||||||
|
//
|
||||||
|
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
|
||||||
|
func (r ReaderSeekerCloser) Close() error {
|
||||||
|
switch t := r.r.(type) {
|
||||||
|
case io.Closer:
|
||||||
|
return t.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
|
||||||
|
// Can be used with the s3manager.Downloader to download content to a buffer
|
||||||
|
// in memory. Safe to use concurrently.
|
||||||
|
type WriteAtBuffer struct {
|
||||||
|
buf []byte
|
||||||
|
m sync.Mutex
|
||||||
|
|
||||||
|
// GrowthCoeff defines the growth rate of the internal buffer. By
|
||||||
|
// default, the growth rate is 1, where expanding the internal
|
||||||
|
// buffer will allocate only enough capacity to fit the new expected
|
||||||
|
// length.
|
||||||
|
GrowthCoeff float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
|
||||||
|
// provided by buf.
|
||||||
|
func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
|
||||||
|
return &WriteAtBuffer{buf: buf}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteAt writes a slice of bytes to a buffer starting at the position provided
|
||||||
|
// The number of bytes written will be returned, or error. Can overwrite previous
|
||||||
|
// written slices if the write ats overlap.
|
||||||
|
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
|
||||||
|
pLen := len(p)
|
||||||
|
expLen := pos + int64(pLen)
|
||||||
|
b.m.Lock()
|
||||||
|
defer b.m.Unlock()
|
||||||
|
if int64(len(b.buf)) < expLen {
|
||||||
|
if int64(cap(b.buf)) < expLen {
|
||||||
|
if b.GrowthCoeff < 1 {
|
||||||
|
b.GrowthCoeff = 1
|
||||||
|
}
|
||||||
|
newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
|
||||||
|
copy(newBuf, b.buf)
|
||||||
|
b.buf = newBuf
|
||||||
|
}
|
||||||
|
b.buf = b.buf[:expLen]
|
||||||
|
}
|
||||||
|
copy(b.buf[pos:], p)
|
||||||
|
return pLen, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns a slice of bytes written to the buffer.
|
||||||
|
func (b *WriteAtBuffer) Bytes() []byte {
|
||||||
|
b.m.Lock()
|
||||||
|
defer b.m.Unlock()
|
||||||
|
return b.buf[:len(b.buf):len(b.buf)]
|
||||||
|
}
|
8
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
Normal file
8
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// Package aws provides core functionality for making requests to AWS services.
|
||||||
|
package aws
|
||||||
|
|
||||||
|
// SDKName is the name of this AWS SDK
|
||||||
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
|
// SDKVersion is the version of this SDK
|
||||||
|
const SDKVersion = "1.4.14"
|
70
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
generated
vendored
Normal file
70
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
// Package endpoints validates regional endpoints for services.
|
||||||
|
package endpoints
|
||||||
|
|
||||||
|
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
|
||||||
|
//go:generate gofmt -s -w endpoints_map.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NormalizeEndpoint takes and endpoint and service API information to return a
|
||||||
|
// normalized endpoint and signing region. If the endpoint is not an empty string
|
||||||
|
// the service name and region will be used to look up the service's API endpoint.
|
||||||
|
// If the endpoint is provided the scheme will be added if it is not present.
|
||||||
|
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) {
|
||||||
|
if endpoint == "" {
|
||||||
|
return EndpointForRegion(serviceName, region, disableSSL, useDualStack)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AddScheme(endpoint, disableSSL), ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointForRegion returns an endpoint and its signing region for a service and region.
|
||||||
|
// if the service and region pair are not found endpoint and signingRegion will be empty.
|
||||||
|
func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) {
|
||||||
|
dualStackField := ""
|
||||||
|
if useDualStack {
|
||||||
|
dualStackField = "/dualstack"
|
||||||
|
}
|
||||||
|
|
||||||
|
derivedKeys := []string{
|
||||||
|
region + "/" + svcName + dualStackField,
|
||||||
|
region + "/*" + dualStackField,
|
||||||
|
"*/" + svcName + dualStackField,
|
||||||
|
"*/*" + dualStackField,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range derivedKeys {
|
||||||
|
if val, ok := endpointsMap.Endpoints[key]; ok {
|
||||||
|
ep := val.Endpoint
|
||||||
|
ep = strings.Replace(ep, "{region}", region, -1)
|
||||||
|
ep = strings.Replace(ep, "{service}", svcName, -1)
|
||||||
|
|
||||||
|
endpoint = ep
|
||||||
|
signingRegion = val.SigningRegion
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return AddScheme(endpoint, disableSSL), signingRegion
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regular expression to determine if the endpoint string is prefixed with a scheme.
|
||||||
|
var schemeRE = regexp.MustCompile("^([^:]+)://")
|
||||||
|
|
||||||
|
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
|
||||||
|
// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
|
||||||
|
func AddScheme(endpoint string, disableSSL bool) string {
|
||||||
|
if endpoint != "" && !schemeRE.MatchString(endpoint) {
|
||||||
|
scheme := "https"
|
||||||
|
if disableSSL {
|
||||||
|
scheme = "http"
|
||||||
|
}
|
||||||
|
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
return endpoint
|
||||||
|
}
|
78
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
generated
vendored
Normal file
78
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
{
|
||||||
|
"version": 2,
|
||||||
|
"endpoints": {
|
||||||
|
"*/*": {
|
||||||
|
"endpoint": "{service}.{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"cn-north-1/*": {
|
||||||
|
"endpoint": "{service}.{region}.amazonaws.com.cn",
|
||||||
|
"signatureVersion": "v4"
|
||||||
|
},
|
||||||
|
"cn-north-1/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/iam": {
|
||||||
|
"endpoint": "iam.us-gov.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/sts": {
|
||||||
|
"endpoint": "sts.us-gov-west-1.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/s3": {
|
||||||
|
"endpoint": "s3-{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-gov-west-1/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
|
"*/cloudfront": {
|
||||||
|
"endpoint": "cloudfront.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/cloudsearchdomain": {
|
||||||
|
"endpoint": "",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/data.iot": {
|
||||||
|
"endpoint": "",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
|
"*/iam": {
|
||||||
|
"endpoint": "iam.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/importexport": {
|
||||||
|
"endpoint": "importexport.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/route53": {
|
||||||
|
"endpoint": "route53.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/sts": {
|
||||||
|
"endpoint": "sts.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/waf": {
|
||||||
|
"endpoint": "waf.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"us-east-1/sdb": {
|
||||||
|
"endpoint": "sdb.amazonaws.com",
|
||||||
|
"signingRegion": "us-east-1"
|
||||||
|
},
|
||||||
|
"*/s3": {
|
||||||
|
"endpoint": "s3-{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"*/s3/dualstack": {
|
||||||
|
"endpoint": "s3.dualstack.{region}.amazonaws.com"
|
||||||
|
},
|
||||||
|
"us-east-1/s3": {
|
||||||
|
"endpoint": "s3.amazonaws.com"
|
||||||
|
},
|
||||||
|
"eu-central-1/s3": {
|
||||||
|
"endpoint": "{service}.{region}.amazonaws.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
91
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
Normal file
91
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package endpoints
|
||||||
|
|
||||||
|
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||||
|
|
||||||
|
type endpointStruct struct {
|
||||||
|
Version int
|
||||||
|
Endpoints map[string]endpointEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
type endpointEntry struct {
|
||||||
|
Endpoint string
|
||||||
|
SigningRegion string
|
||||||
|
}
|
||||||
|
|
||||||
|
var endpointsMap = endpointStruct{
|
||||||
|
Version: 2,
|
||||||
|
Endpoints: map[string]endpointEntry{
|
||||||
|
"*/*": {
|
||||||
|
Endpoint: "{service}.{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"*/cloudfront": {
|
||||||
|
Endpoint: "cloudfront.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/cloudsearchdomain": {
|
||||||
|
Endpoint: "",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/data.iot": {
|
||||||
|
Endpoint: "",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
|
"*/iam": {
|
||||||
|
Endpoint: "iam.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/importexport": {
|
||||||
|
Endpoint: "importexport.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/route53": {
|
||||||
|
Endpoint: "route53.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/s3": {
|
||||||
|
Endpoint: "s3-{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"*/s3/dualstack": {
|
||||||
|
Endpoint: "s3.dualstack.{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"*/sts": {
|
||||||
|
Endpoint: "sts.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"*/waf": {
|
||||||
|
Endpoint: "waf.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"cn-north-1/*": {
|
||||||
|
Endpoint: "{service}.{region}.amazonaws.com.cn",
|
||||||
|
},
|
||||||
|
"cn-north-1/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
|
"eu-central-1/s3": {
|
||||||
|
Endpoint: "{service}.{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-east-1/s3": {
|
||||||
|
Endpoint: "s3.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-east-1/sdb": {
|
||||||
|
Endpoint: "sdb.amazonaws.com",
|
||||||
|
SigningRegion: "us-east-1",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/iam": {
|
||||||
|
Endpoint: "iam.us-gov.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/s3": {
|
||||||
|
Endpoint: "s3-{region}.amazonaws.com",
|
||||||
|
},
|
||||||
|
"us-gov-west-1/sts": {
|
||||||
|
Endpoint: "sts.us-gov-west-1.amazonaws.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue