forked from TrueCloudLab/restic
Vendor dependencies with dep
This commit is contained in:
parent
df8a5792f1
commit
91edebf1fe
1691 changed files with 466360 additions and 0 deletions
26
vendor/github.com/elithrar/simple-scrypt/.gitignore
generated
vendored
Normal file
26
vendor/github.com/elithrar/simple-scrypt/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
*.DS_Store
|
15
vendor/github.com/elithrar/simple-scrypt/.travis.yml
generated
vendored
Normal file
15
vendor/github.com/elithrar/simple-scrypt/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/vet
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- go tool vet .
|
||||
- go test -v ./...
|
22
vendor/github.com/elithrar/simple-scrypt/LICENSE
generated
vendored
Normal file
22
vendor/github.com/elithrar/simple-scrypt/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Matthew Silverlock (matt@eatsleeprepeat.net)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
110
vendor/github.com/elithrar/simple-scrypt/README.md
generated
vendored
Normal file
110
vendor/github.com/elithrar/simple-scrypt/README.md
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
# simple-scrypt
|
||||
[](https://godoc.org/github.com/elithrar/simple-scrypt) [](https://travis-ci.org/elithrar/simple-scrypt)
|
||||
|
||||
simple-scrypt provides a convenience wrapper around Go's existing
|
||||
[scrypt](http://golang.org/x/crypto/scrypt) package that makes it easier to
|
||||
securely derive strong keys ("hash user passwords"). This library allows you to:
|
||||
|
||||
* Generate a scrypt derived key with a crytographically secure salt and sane
|
||||
default parameters for N, r and p.
|
||||
* Upgrade the parameters used to generate keys as hardware improves by storing
|
||||
them with the derived key (the scrypt spec. doesn't allow for this by
|
||||
default).
|
||||
* Provide your own parameters (if you wish to).
|
||||
|
||||
The API closely mirrors Go's [bcrypt](https://golang.org/x/crypto/bcrypt)
|
||||
library in an effort to make it easy to migrate—and because it's an easy to grok
|
||||
API.
|
||||
|
||||
## Example
|
||||
|
||||
simple-scrypt doesn't try to re-invent the wheel or do anything "special". It
|
||||
wraps the `scrypt.Key` function as thinly as possible, generates a
|
||||
crytographically secure salt for you using Go's `crypto/rand` package, and
|
||||
returns the derived key with the parameters prepended:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import(
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/elithrar/simple-scrypt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// e.g. r.PostFormValue("password")
|
||||
passwordFromForm := "prew8fid9hick6c"
|
||||
|
||||
// Generates a derived key of the form "N$r$p$salt$dk" where N, r and p are defined as per
|
||||
// Colin Percival's scrypt paper: http://www.tarsnap.com/scrypt/scrypt.pdf
|
||||
// scrypt.Defaults (N=16384, r=8, p=1) makes it easy to provide these parameters, and
|
||||
// (should you wish) provide your own values via the scrypt.Params type.
|
||||
hash, err := scrypt.GenerateFromPassword([]byte(passwordFromForm), scrypt.DefaultParams)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Print the derived key with its parameters prepended.
|
||||
fmt.Printf("%s\n", hash)
|
||||
|
||||
// Uses the parameters from the existing derived key. Return an error if they don't match.
|
||||
err := scrypt.CompareHashAndPassword(hash, []byte(passwordFromForm))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Upgrading Parameters
|
||||
|
||||
Upgrading derived keys from a set of parameters to a "stronger" set of parameters
|
||||
as hardware improves, or as you scale (and move your auth process to separate
|
||||
hardware), can be pretty useful. Here's how to do it with simple-scrypt:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
// SCENE: We've successfully authenticated a user, compared their submitted
|
||||
// (cleartext) password against the derived key stored in our database, and
|
||||
// now want to upgrade the parameters (more rounds, more parallelism) to
|
||||
// reflect some shiny new hardware we just purchased. As the user is logging
|
||||
// in, we can retrieve the parameters used to generate their key, and if
|
||||
// they don't match our "new" parameters, we can re-generate the key while
|
||||
// we still have the cleartext password in memory
|
||||
// (e.g. before the HTTP request ends).
|
||||
current, err := scrypt.Cost(hash)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Now to check them against our own Params struct (e.g. using reflect.DeepEquals)
|
||||
// and determine whether we want to generate a new key with our "upgraded" parameters.
|
||||
slower := scrypt.Params{
|
||||
N: 32768,
|
||||
R: 8,
|
||||
P: 2,
|
||||
SaltLen: 16,
|
||||
DKLen: 32,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(current, slower) {
|
||||
// Re-generate the key with the slower parameters
|
||||
// here using scrypt.GenerateFromPassword
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## TO-DO:
|
||||
|
||||
The following features are planned. PRs are welcome.
|
||||
|
||||
- [x] Tag a release build.
|
||||
- [x] Automatically calculate "optimal" values for N, r, p similar [to the Ruby scrypt library](https://github.com/pbhogan/scrypt/blob/master/lib/scrypt.rb#L97-L146)
|
||||
e.g. `func Calibrate(duration int, mem int, fallback Params) (Params, error)`
|
||||
- contributed thanks to @tgulacsi.
|
||||
|
||||
## License
|
||||
|
||||
MIT Licensed. See LICENSE file for details.
|
||||
|
295
vendor/github.com/elithrar/simple-scrypt/scrypt.go
generated
vendored
Normal file
295
vendor/github.com/elithrar/simple-scrypt/scrypt.go
generated
vendored
Normal file
|
@ -0,0 +1,295 @@
|
|||
// Package scrypt provides a convenience wrapper around Go's existing scrypt package
|
||||
// that makes it easier to securely derive strong keys from weak
|
||||
// inputs (i.e. user passwords).
|
||||
// The package provides password generation, constant-time comparison and
|
||||
// parameter upgrading for scrypt derived keys.
|
||||
package scrypt
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/scrypt"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
maxInt = 1<<31 - 1
|
||||
minDKLen = 16 // the minimum derived key length in bytes.
|
||||
minSaltLen = 8 // the minimum allowed salt length in bytes.
|
||||
)
|
||||
|
||||
// Params describes the input parameters to the scrypt
|
||||
// key derivation function as per Colin Percival's scrypt
|
||||
// paper: http://www.tarsnap.com/scrypt/scrypt.pdf
|
||||
type Params struct {
|
||||
N int // CPU/memory cost parameter (logN)
|
||||
R int // block size parameter (octets)
|
||||
P int // parallelisation parameter (positive int)
|
||||
SaltLen int // bytes to use as salt (octets)
|
||||
DKLen int // length of the derived key (octets)
|
||||
}
|
||||
|
||||
// DefaultParams provides sensible default inputs into the scrypt function
|
||||
// for interactive use (i.e. web applications).
|
||||
// These defaults will consume approxmiately 16MB of memory (128 * r * N).
|
||||
// The default key length is 256 bits.
|
||||
var DefaultParams = Params{N: 16384, R: 8, P: 1, SaltLen: 16, DKLen: 32}
|
||||
|
||||
// ErrInvalidHash is returned when failing to parse a provided scrypt
|
||||
// hash and/or parameters.
|
||||
var ErrInvalidHash = errors.New("scrypt: the provided hash is not in the correct format")
|
||||
|
||||
// ErrInvalidParams is returned when the cost parameters (N, r, p), salt length
|
||||
// or derived key length are invalid.
|
||||
var ErrInvalidParams = errors.New("scrypt: the parameters provided are invalid")
|
||||
|
||||
// ErrMismatchedHashAndPassword is returned when a password (hashed) and
|
||||
// given hash do not match.
|
||||
var ErrMismatchedHashAndPassword = errors.New("scrypt: the hashed password does not match the hash of the given password")
|
||||
|
||||
// GenerateRandomBytes returns securely generated random bytes.
|
||||
// It will return an error if the system's secure random
|
||||
// number generator fails to function correctly, in which
|
||||
// case the caller should not continue.
|
||||
func GenerateRandomBytes(n int) ([]byte, error) {
|
||||
b := make([]byte, n)
|
||||
_, err := rand.Read(b)
|
||||
// err == nil only if len(b) == n
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GenerateFromPassword returns the derived key of the password using the
|
||||
// parameters provided. The parameters are prepended to the derived key and
|
||||
// separated by the "$" character (0x24).
|
||||
// If the parameters provided are less than the minimum acceptable values,
|
||||
// an error will be returned.
|
||||
func GenerateFromPassword(password []byte, params Params) ([]byte, error) {
|
||||
salt, err := GenerateRandomBytes(params.SaltLen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := params.Check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// scrypt.Key returns the raw scrypt derived key.
|
||||
dk, err := scrypt.Key(password, salt, params.N, params.R, params.P, params.DKLen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepend the params and the salt to the derived key, each separated
|
||||
// by a "$" character. The salt and the derived key are hex encoded.
|
||||
return []byte(fmt.Sprintf("%d$%d$%d$%x$%x", params.N, params.R, params.P, salt, dk)), nil
|
||||
}
|
||||
|
||||
// CompareHashAndPassword compares a derived key with the possible cleartext
|
||||
// equivalent. The parameters used in the provided derived key are used.
|
||||
// The comparison performed by this function is constant-time. It returns nil
|
||||
// on success, and an error if the derived keys do not match.
|
||||
func CompareHashAndPassword(hash []byte, password []byte) error {
|
||||
// Decode existing hash, retrieve params and salt.
|
||||
params, salt, dk, err := decodeHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// scrypt the cleartext password with the same parameters and salt
|
||||
other, err := scrypt.Key(password, salt, params.N, params.R, params.P, params.DKLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Constant time comparison
|
||||
if subtle.ConstantTimeCompare(dk, other) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrMismatchedHashAndPassword
|
||||
}
|
||||
|
||||
// Check checks that the parameters are valid for input into the
|
||||
// scrypt key derivation function.
|
||||
func (p *Params) Check() error {
|
||||
// Validate N
|
||||
if p.N > maxInt || p.N <= 1 || p.N%2 != 0 {
|
||||
return ErrInvalidParams
|
||||
}
|
||||
|
||||
// Validate r
|
||||
if p.R < 1 || p.R > maxInt {
|
||||
return ErrInvalidParams
|
||||
}
|
||||
|
||||
// Validate p
|
||||
if p.P < 1 || p.P > maxInt {
|
||||
return ErrInvalidParams
|
||||
}
|
||||
|
||||
// Validate that r & p don't exceed 2^30 and that N, r, p values don't
|
||||
// exceed the limits defined by the scrypt algorithm.
|
||||
if uint64(p.R)*uint64(p.P) >= 1<<30 || p.R > maxInt/128/p.P || p.R > maxInt/256 || p.N > maxInt/128/p.R {
|
||||
return ErrInvalidParams
|
||||
}
|
||||
|
||||
// Validate the salt length
|
||||
if p.SaltLen < minSaltLen || p.SaltLen > maxInt {
|
||||
return ErrInvalidParams
|
||||
}
|
||||
|
||||
// Validate the derived key length
|
||||
if p.DKLen < minDKLen || p.DKLen > maxInt {
|
||||
return ErrInvalidParams
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeHash extracts the parameters, salt and derived key from the
|
||||
// provided hash. It returns an error if the hash format is invalid and/or
|
||||
// the parameters are invalid.
|
||||
func decodeHash(hash []byte) (Params, []byte, []byte, error) {
|
||||
vals := strings.Split(string(hash), "$")
|
||||
|
||||
// P, N, R, salt, scrypt derived key
|
||||
if len(vals) != 5 {
|
||||
return Params{}, nil, nil, ErrInvalidHash
|
||||
}
|
||||
|
||||
var params Params
|
||||
var err error
|
||||
|
||||
params.N, err = strconv.Atoi(vals[0])
|
||||
if err != nil {
|
||||
return params, nil, nil, ErrInvalidHash
|
||||
}
|
||||
|
||||
params.R, err = strconv.Atoi(vals[1])
|
||||
if err != nil {
|
||||
return params, nil, nil, ErrInvalidHash
|
||||
}
|
||||
|
||||
params.P, err = strconv.Atoi(vals[2])
|
||||
if err != nil {
|
||||
return params, nil, nil, ErrInvalidHash
|
||||
}
|
||||
|
||||
salt, err := hex.DecodeString(vals[3])
|
||||
if err != nil {
|
||||
return params, nil, nil, ErrInvalidHash
|
||||
}
|
||||
params.SaltLen = len(salt)
|
||||
|
||||
dk, err := hex.DecodeString(vals[4])
|
||||
if err != nil {
|
||||
return params, nil, nil, ErrInvalidHash
|
||||
}
|
||||
params.DKLen = len(dk)
|
||||
|
||||
if err := params.Check(); err != nil {
|
||||
return params, nil, nil, err
|
||||
}
|
||||
|
||||
return params, salt, dk, nil
|
||||
}
|
||||
|
||||
// Cost returns the scrypt parameters used to generate the derived key. This
|
||||
// allows a package user to increase the cost (in time & resources) used as
|
||||
// computational performance increases over time.
|
||||
func Cost(hash []byte) (Params, error) {
|
||||
params, _, _, err := decodeHash(hash)
|
||||
|
||||
return params, err
|
||||
}
|
||||
|
||||
// Calibrate returns the hardest parameters (not weaker than the given params),
|
||||
// allowed by the given limits.
|
||||
// The returned params will not use more memory than the given (MiB);
|
||||
// will not take more time than the given timeout, but more than timeout/2.
|
||||
//
|
||||
//
|
||||
// The default timeout (when the timeout arg is zero) is 200ms.
|
||||
// The default memMiBytes (when memMiBytes is zero) is 16MiB.
|
||||
// The default parameters (when params == Params{}) is DefaultParams.
|
||||
func Calibrate(timeout time.Duration, memMiBytes int, params Params) (Params, error) {
|
||||
p := params
|
||||
if p.N == 0 || p.R == 0 || p.P == 0 || p.SaltLen == 0 || p.DKLen == 0 {
|
||||
p = DefaultParams
|
||||
} else if err := p.Check(); err != nil {
|
||||
return p, err
|
||||
}
|
||||
if timeout == 0 {
|
||||
timeout = 200 * time.Millisecond
|
||||
}
|
||||
if memMiBytes == 0 {
|
||||
memMiBytes = 16
|
||||
}
|
||||
salt, err := GenerateRandomBytes(p.SaltLen)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
password := []byte("weakpassword")
|
||||
|
||||
// First, we calculate the minimal required time.
|
||||
start := time.Now()
|
||||
if _, err := scrypt.Key(password, salt, p.N, p.R, p.P, p.DKLen); err != nil {
|
||||
return p, err
|
||||
}
|
||||
dur := time.Since(start)
|
||||
|
||||
for dur < timeout && p.N < maxInt>>1 {
|
||||
p.N <<= 1
|
||||
}
|
||||
|
||||
// Memory usage is at least 128 * r * N, see
|
||||
// http://blog.ircmaxell.com/2014/03/why-i-dont-recommend-scrypt.html
|
||||
// or https://drupal.org/comment/4675994#comment-4675994
|
||||
|
||||
var again bool
|
||||
memBytes := memMiBytes << 20
|
||||
// If we'd use more memory then the allowed, we can tune the memory usage
|
||||
for 128*p.R*p.N > memBytes {
|
||||
if p.R > 1 {
|
||||
// by lowering r
|
||||
p.R--
|
||||
} else if p.N > 16 {
|
||||
again = true
|
||||
p.N >>= 1
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !again {
|
||||
return p, p.Check()
|
||||
}
|
||||
|
||||
// We have to compensate the lowering of N, by increasing p.
|
||||
for i := 0; i < 10 && p.P > 0; i++ {
|
||||
start := time.Now()
|
||||
if _, err := scrypt.Key(password, salt, p.N, p.R, p.P, p.DKLen); err != nil {
|
||||
return p, err
|
||||
}
|
||||
dur := time.Since(start)
|
||||
if dur < timeout/2 {
|
||||
p.P = int(float64(p.P)*float64(timeout/dur) + 1)
|
||||
} else if dur > timeout && p.P > 1 {
|
||||
p.P--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return p, p.Check()
|
||||
}
|
156
vendor/github.com/elithrar/simple-scrypt/scrypt_test.go
generated
vendored
Normal file
156
vendor/github.com/elithrar/simple-scrypt/scrypt_test.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
package scrypt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Test cases
|
||||
var (
|
||||
testLengths = []int{1, 8, 16, 32, 100, 500, 2500}
|
||||
password = "super-secret-password"
|
||||
)
|
||||
|
||||
var testParams = []struct {
|
||||
pass bool
|
||||
params Params
|
||||
}{
|
||||
{true, Params{16384, 8, 1, 32, 64}},
|
||||
{true, Params{16384, 8, 1, 16, 32}},
|
||||
{true, Params{65536, 8, 1, 16, 64}},
|
||||
{true, Params{1048576, 8, 2, 64, 128}},
|
||||
{false, Params{-1, 8, 1, 16, 32}}, // invalid N
|
||||
{false, Params{0, 8, 1, 16, 32}}, // invalid N
|
||||
{false, Params{1 << 31, 8, 1, 16, 32}}, // invalid N
|
||||
{false, Params{16384, 0, 12, 16, 32}}, // invalid R
|
||||
{false, Params{16384, 8, 0, 16, 32}}, // invalid R > maxInt/128/P
|
||||
{false, Params{16384, 1 << 24, 1, 16, 32}}, // invalid R > maxInt/256
|
||||
{false, Params{1 << 31, 8, 0, 16, 32}}, // invalid p < 0
|
||||
{false, Params{4096, 8, 1, 5, 32}}, // invalid SaltLen
|
||||
{false, Params{4096, 8, 1, 16, 2}}, // invalid DKLen
|
||||
}
|
||||
|
||||
var testHashes = []struct {
|
||||
pass bool
|
||||
hash string
|
||||
}{
|
||||
{false, "1$8$1$9003d0e8e69482843e6bd560c2c9cd94$1976f233124e0ee32bb2678eb1b0ed668eb66cff6fa43279d1e33f6e81af893b"}, // N too small
|
||||
{false, "$9003d0e8e69482843e6bd560c2c9cd94$1976f233124e0ee32bb2678eb1b0ed668eb66cff6fa43279d1e33f6e81af893b"}, // too short
|
||||
{false, "16384#8#1#18fbc325efa37402d27c3c2172900cbf$d4e5e1b9eedc1a6a14aad6624ab57b7b42ae75b9c9845fde32de765835f2aaf9"}, // incorrect separators
|
||||
{false, "16384$nogood$1$18fbc325efa37402d27c3c2172900cbf$d4e5e1b9eedc1a6a14aad6624ab57b7b42ae75b9c9845fde32de765835f2aaf9"}, // invalid R
|
||||
{false, "16384$8$abc1$18fbc325efa37402d27c3c2172900cbf$d4e5e1b9eedc1a6a14aad6624ab57b7b42ae75b9c9845fde32de765835f2aaf9"}, // invalid P
|
||||
{false, "16384$8$1$Tk9QRQ==$d4e5e1b9eedc1a6a14aad6624ab57b7b42ae75b9c9845fde32de765835f2aaf9"}, // invalid salt (not hex)
|
||||
{false, "16384$8$1$18fbc325efa37402d27c3c2172900cbf$42ae====/75b9c9845fde32de765835f2aaf9"}, // invalid dk (not hex)
|
||||
}
|
||||
|
||||
func TestGenerateRandomBytes(t *testing.T) {
|
||||
for _, v := range testLengths {
|
||||
_, err := GenerateRandomBytes(v)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate random bytes")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateFromPassword(t *testing.T) {
|
||||
for _, v := range testParams {
|
||||
_, err := GenerateFromPassword([]byte(password), v.params)
|
||||
if err != nil && v.pass == true {
|
||||
t.Fatalf("no error was returned when expected for params: %+v", v.params)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareHashAndPassword(t *testing.T) {
|
||||
hash, err := GenerateFromPassword([]byte(password), DefaultParams)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := CompareHashAndPassword(hash, []byte(password)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := CompareHashAndPassword(hash, []byte("invalid-password")); err == nil {
|
||||
t.Fatalf("mismatched passwords did not produce an error")
|
||||
}
|
||||
|
||||
invalidHash := []byte("$166$$11$a2ad56a415af5")
|
||||
if err := CompareHashAndPassword(invalidHash, []byte(password)); err == nil {
|
||||
t.Fatalf("did not identify an invalid hash")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCost(t *testing.T) {
|
||||
hash, err := GenerateFromPassword([]byte(password), DefaultParams)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
params, err := Cost(hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(params, DefaultParams) {
|
||||
t.Fatal("cost mismatch: parameters used did not match those retrieved")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeHash(t *testing.T) {
|
||||
for _, v := range testHashes {
|
||||
_, err := Cost([]byte(v.hash))
|
||||
if err == nil && v.pass == false {
|
||||
t.Fatal("invalid hash: did not correctly detect invalid password hash")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalibrate(t *testing.T) {
|
||||
timeout := 500 * time.Millisecond
|
||||
for testNum, tc := range []struct {
|
||||
MemMiB int
|
||||
}{
|
||||
{64},
|
||||
{32},
|
||||
{16},
|
||||
{8},
|
||||
{1},
|
||||
} {
|
||||
var (
|
||||
p Params
|
||||
err error
|
||||
)
|
||||
p, err = Calibrate(timeout, tc.MemMiB, p)
|
||||
if err != nil {
|
||||
t.Fatalf("%d. %#v: %v", testNum, p, err)
|
||||
}
|
||||
if (128*p.R*p.N)>>20 > tc.MemMiB {
|
||||
t.Errorf("%d. wanted memory limit %d, got %d.", testNum, tc.MemMiB, (128*p.R*p.N)>>20)
|
||||
}
|
||||
start := time.Now()
|
||||
_, err = GenerateFromPassword([]byte(password), p)
|
||||
dur := time.Since(start)
|
||||
t.Logf("GenerateFromPassword with %#v took %s (%v)", p, dur, err)
|
||||
if err != nil {
|
||||
t.Fatalf("%d. GenerateFromPassword with %#v: %v", testNum, p, err)
|
||||
}
|
||||
if dur < timeout/2 {
|
||||
t.Errorf("%d. GenerateFromPassword was too fast (wanted around %s, got %s) with %#v.", testNum, timeout, dur, p)
|
||||
} else if timeout*2 < dur {
|
||||
t.Errorf("%d. GenerateFromPassword took too long (wanted around %s, got %s) with %#v.", testNum, timeout, dur, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCalibrate() {
|
||||
p, err := Calibrate(1*time.Second, 128, Params{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dk, err := GenerateFromPassword([]byte("super-secret-password"), p)
|
||||
fmt.Printf("generated password is %q (%v)", dk, err)
|
||||
}
|
5
vendor/github.com/go-ini/ini/.gitignore
generated
vendored
Normal file
5
vendor/github.com/go-ini/ini/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
testdata/conf_out.ini
|
||||
ini.sublime-project
|
||||
ini.sublime-workspace
|
||||
testdata/conf_reflect.ini
|
||||
.idea
|
14
vendor/github.com/go-ini/ini/.travis.yml
generated
vendored
Normal file
14
vendor/github.com/go-ini/ini/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- master
|
||||
|
||||
script:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/smartystreets/goconvey
|
||||
- go test -v -cover -race
|
191
vendor/github.com/go-ini/ini/LICENSE
generated
vendored
Normal file
191
vendor/github.com/go-ini/ini/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
12
vendor/github.com/go-ini/ini/Makefile
generated
vendored
Normal file
12
vendor/github.com/go-ini/ini/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
.PHONY: build test bench vet
|
||||
|
||||
build: vet bench
|
||||
|
||||
test:
|
||||
go test -v -cover -race
|
||||
|
||||
bench:
|
||||
go test -v -cover -race -test.bench=. -test.benchmem
|
||||
|
||||
vet:
|
||||
go vet
|
746
vendor/github.com/go-ini/ini/README.md
generated
vendored
Normal file
746
vendor/github.com/go-ini/ini/README.md
generated
vendored
Normal file
|
@ -0,0 +1,746 @@
|
|||
INI [](https://travis-ci.org/go-ini/ini) [](https://sourcegraph.com/github.com/go-ini/ini?badge)
|
||||
===
|
||||
|
||||

|
||||
|
||||
Package ini provides INI file read and write functionality in Go.
|
||||
|
||||
[简体中文](README_ZH.md)
|
||||
|
||||
## Feature
|
||||
|
||||
- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
|
||||
- Read with recursion values.
|
||||
- Read with parent-child sections.
|
||||
- Read with auto-increment key names.
|
||||
- Read with multiple-line values.
|
||||
- Read with tons of helper methods.
|
||||
- Read and convert values to Go types.
|
||||
- Read and **WRITE** comments of sections and keys.
|
||||
- Manipulate sections, keys and comments with ease.
|
||||
- Keep sections and keys in order as you parse and save.
|
||||
|
||||
## Installation
|
||||
|
||||
To use a tagged revision:
|
||||
|
||||
go get gopkg.in/ini.v1
|
||||
|
||||
To use with latest changes:
|
||||
|
||||
go get github.com/go-ini/ini
|
||||
|
||||
Please add `-u` flag to update in the future.
|
||||
|
||||
### Testing
|
||||
|
||||
If you want to test on your machine, please apply `-t` flag:
|
||||
|
||||
go get -t gopkg.in/ini.v1
|
||||
|
||||
Please add `-u` flag to update in the future.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Loading from data sources
|
||||
|
||||
A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error.
|
||||
|
||||
```go
|
||||
cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
|
||||
```
|
||||
|
||||
Or start with an empty object:
|
||||
|
||||
```go
|
||||
cfg := ini.Empty()
|
||||
```
|
||||
|
||||
When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
|
||||
|
||||
```go
|
||||
err := cfg.Append("other file", []byte("other raw data"))
|
||||
```
|
||||
|
||||
If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
|
||||
|
||||
```go
|
||||
cfg, err := ini.LooseLoad("filename", "filename_404")
|
||||
```
|
||||
|
||||
The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
|
||||
|
||||
#### Ignore cases of key name
|
||||
|
||||
When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
|
||||
|
||||
```go
|
||||
cfg, err := ini.InsensitiveLoad("filename")
|
||||
//...
|
||||
|
||||
// sec1 and sec2 are the exactly same section object
|
||||
sec1, err := cfg.GetSection("Section")
|
||||
sec2, err := cfg.GetSection("SecTIOn")
|
||||
|
||||
// key1 and key2 are the exactly same key object
|
||||
key1, err := sec1.GetKey("Key")
|
||||
key2, err := sec2.GetKey("KeY")
|
||||
```
|
||||
|
||||
#### MySQL-like boolean key
|
||||
|
||||
MySQL's configuration allows a key without value as follows:
|
||||
|
||||
```ini
|
||||
[mysqld]
|
||||
...
|
||||
skip-host-cache
|
||||
skip-name-resolve
|
||||
```
|
||||
|
||||
By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
|
||||
```
|
||||
|
||||
The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
|
||||
|
||||
To generate such keys in your program, you could use `NewBooleanKey`:
|
||||
|
||||
```go
|
||||
key, err := sec.NewBooleanKey("skip-host-cache")
|
||||
```
|
||||
|
||||
#### Comment
|
||||
|
||||
Take care that following format will be treated as comment:
|
||||
|
||||
1. Line begins with `#` or `;`
|
||||
2. Words after `#` or `;`
|
||||
3. Words after section name (i.e words after `[some section name]`)
|
||||
|
||||
If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
|
||||
|
||||
Alternatively, you can use following `LoadOptions` to completely ignore inline comments:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini"))
|
||||
```
|
||||
|
||||
### Working with sections
|
||||
|
||||
To get a section, you would need to:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("section name")
|
||||
```
|
||||
|
||||
For a shortcut for default section, just give an empty string as name:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("")
|
||||
```
|
||||
|
||||
When you're pretty sure the section exists, following code could make your life easier:
|
||||
|
||||
```go
|
||||
section := cfg.Section("section name")
|
||||
```
|
||||
|
||||
What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
|
||||
|
||||
To create a new section:
|
||||
|
||||
```go
|
||||
err := cfg.NewSection("new section")
|
||||
```
|
||||
|
||||
To get a list of sections or section names:
|
||||
|
||||
```go
|
||||
sections := cfg.Sections()
|
||||
names := cfg.SectionStrings()
|
||||
```
|
||||
|
||||
### Working with keys
|
||||
|
||||
To get a key under a section:
|
||||
|
||||
```go
|
||||
key, err := cfg.Section("").GetKey("key name")
|
||||
```
|
||||
|
||||
Same rule applies to key operations:
|
||||
|
||||
```go
|
||||
key := cfg.Section("").Key("key name")
|
||||
```
|
||||
|
||||
To check if a key exists:
|
||||
|
||||
```go
|
||||
yes := cfg.Section("").HasKey("key name")
|
||||
```
|
||||
|
||||
To create a new key:
|
||||
|
||||
```go
|
||||
err := cfg.Section("").NewKey("name", "value")
|
||||
```
|
||||
|
||||
To get a list of keys or key names:
|
||||
|
||||
```go
|
||||
keys := cfg.Section("").Keys()
|
||||
names := cfg.Section("").KeyStrings()
|
||||
```
|
||||
|
||||
To get a clone hash of keys and corresponding values:
|
||||
|
||||
```go
|
||||
hash := cfg.Section("").KeysHash()
|
||||
```
|
||||
|
||||
### Working with values
|
||||
|
||||
To get a string value:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").String()
|
||||
```
|
||||
|
||||
To validate key value on the fly:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
||||
if len(in) == 0 {
|
||||
return "default"
|
||||
}
|
||||
return in
|
||||
})
|
||||
```
|
||||
|
||||
If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").Value()
|
||||
```
|
||||
|
||||
To check if raw value exists:
|
||||
|
||||
```go
|
||||
yes := cfg.Section("").HasValue("test value")
|
||||
```
|
||||
|
||||
To get value with types:
|
||||
|
||||
```go
|
||||
// For boolean values:
|
||||
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
|
||||
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
|
||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
||||
v, err = cfg.Section("").Key("INT").Int()
|
||||
v, err = cfg.Section("").Key("INT64").Int64()
|
||||
v, err = cfg.Section("").Key("UINT").Uint()
|
||||
v, err = cfg.Section("").Key("UINT64").Uint64()
|
||||
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
|
||||
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
|
||||
|
||||
v = cfg.Section("").Key("BOOL").MustBool()
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64()
|
||||
v = cfg.Section("").Key("INT").MustInt()
|
||||
v = cfg.Section("").Key("INT64").MustInt64()
|
||||
v = cfg.Section("").Key("UINT").MustUint()
|
||||
v = cfg.Section("").Key("UINT64").MustUint64()
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
|
||||
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
|
||||
|
||||
// Methods start with Must also accept one argument for default value
|
||||
// when key not found or fail to parse value to given type.
|
||||
// Except method MustString, which you have to pass a default value.
|
||||
|
||||
v = cfg.Section("").Key("String").MustString("default")
|
||||
v = cfg.Section("").Key("BOOL").MustBool(true)
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
|
||||
v = cfg.Section("").Key("INT").MustInt(10)
|
||||
v = cfg.Section("").Key("INT64").MustInt64(99)
|
||||
v = cfg.Section("").Key("UINT").MustUint(3)
|
||||
v = cfg.Section("").Key("UINT64").MustUint64(6)
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
|
||||
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
|
||||
```
|
||||
|
||||
What if my value is three-line long?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
ADDRESS = """404 road,
|
||||
NotFound, State, 5000
|
||||
Earth"""
|
||||
```
|
||||
|
||||
Not a problem!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("ADDRESS").String()
|
||||
|
||||
/* --- start ---
|
||||
404 road,
|
||||
NotFound, State, 5000
|
||||
Earth
|
||||
------ end --- */
|
||||
```
|
||||
|
||||
That's cool, how about continuation lines?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
two_lines = how about \
|
||||
continuation lines?
|
||||
lots_of_lines = 1 \
|
||||
2 \
|
||||
3 \
|
||||
4
|
||||
```
|
||||
|
||||
Piece of cake!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
|
||||
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
|
||||
```
|
||||
|
||||
Well, I hate continuation lines, how do I disable that?
|
||||
|
||||
```go
|
||||
cfg, err := ini.LoadSources(ini.LoadOptions{
|
||||
IgnoreContinuation: true,
|
||||
}, "filename")
|
||||
```
|
||||
|
||||
Holy crap!
|
||||
|
||||
Note that single quotes around values will be stripped:
|
||||
|
||||
```ini
|
||||
foo = "some value" // foo: some value
|
||||
bar = 'some value' // bar: some value
|
||||
```
|
||||
|
||||
That's all? Hmm, no.
|
||||
|
||||
#### Helper methods of working with values
|
||||
|
||||
To get value with given candidates:
|
||||
|
||||
```go
|
||||
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
|
||||
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
|
||||
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
|
||||
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
|
||||
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
|
||||
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
|
||||
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
|
||||
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
|
||||
```
|
||||
|
||||
Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
|
||||
|
||||
To validate value in a given range:
|
||||
|
||||
```go
|
||||
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
|
||||
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
|
||||
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
|
||||
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
|
||||
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
|
||||
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
|
||||
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
|
||||
```
|
||||
|
||||
##### Auto-split values into a slice
|
||||
|
||||
To use zero value of type for invalid inputs:
|
||||
|
||||
```go
|
||||
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
|
||||
// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
|
||||
vals = cfg.Section("").Key("STRINGS").Strings(",")
|
||||
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
|
||||
vals = cfg.Section("").Key("INTS").Ints(",")
|
||||
vals = cfg.Section("").Key("INT64S").Int64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").Uints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").Times(",")
|
||||
```
|
||||
|
||||
To exclude invalid values out of result slice:
|
||||
|
||||
```go
|
||||
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
|
||||
// Input: how, 2.2, are, you -> [2.2]
|
||||
vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
|
||||
vals = cfg.Section("").Key("INTS").ValidInts(",")
|
||||
vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").ValidUints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").ValidTimes(",")
|
||||
```
|
||||
|
||||
Or to return nothing but error when have invalid inputs:
|
||||
|
||||
```go
|
||||
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
|
||||
// Input: how, 2.2, are, you -> error
|
||||
vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
|
||||
vals = cfg.Section("").Key("INTS").StrictInts(",")
|
||||
vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").StrictUints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").StrictTimes(",")
|
||||
```
|
||||
|
||||
### Save your configuration
|
||||
|
||||
Finally, it's time to save your configuration to somewhere.
|
||||
|
||||
A typical way to save configuration is writing it to a file:
|
||||
|
||||
```go
|
||||
// ...
|
||||
err = cfg.SaveTo("my.ini")
|
||||
err = cfg.SaveToIndent("my.ini", "\t")
|
||||
```
|
||||
|
||||
Another way to save is writing to a `io.Writer` interface:
|
||||
|
||||
```go
|
||||
// ...
|
||||
cfg.WriteTo(writer)
|
||||
cfg.WriteToIndent(writer, "\t")
|
||||
```
|
||||
|
||||
By default, spaces are used to align "=" sign between key and values, to disable that:
|
||||
|
||||
```go
|
||||
ini.PrettyFormat = false
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Recursive Values
|
||||
|
||||
For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
|
||||
[author]
|
||||
NAME = Unknwon
|
||||
GITHUB = https://github.com/%(NAME)s
|
||||
|
||||
[package]
|
||||
FULL_NAME = github.com/go-ini/%(NAME)s
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
|
||||
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
|
||||
```
|
||||
|
||||
### Parent-child Sections
|
||||
|
||||
You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
VERSION = v1
|
||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
||||
|
||||
[package]
|
||||
CLONE_URL = https://%(IMPORT_PATH)s
|
||||
|
||||
[package.sub]
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
|
||||
```
|
||||
|
||||
#### Retrieve parent keys available to a child section
|
||||
|
||||
```go
|
||||
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
|
||||
```
|
||||
|
||||
### Unparseable Sections
|
||||
|
||||
Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
|
||||
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
|
||||
|
||||
body := cfg.Section("COMMENTS").Body()
|
||||
|
||||
/* --- start ---
|
||||
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
|
||||
------ end --- */
|
||||
```
|
||||
|
||||
### Auto-increment Key Names
|
||||
|
||||
If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
|
||||
|
||||
```ini
|
||||
[features]
|
||||
-: Support read/write comments of keys and sections
|
||||
-: Support auto-increment of key names
|
||||
-: Support load multiple files to overwrite key values
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
|
||||
```
|
||||
|
||||
### Map To Struct
|
||||
|
||||
Want more objective way to play with INI? Cool.
|
||||
|
||||
```ini
|
||||
Name = Unknwon
|
||||
age = 21
|
||||
Male = true
|
||||
Born = 1993-01-01T20:17:05Z
|
||||
|
||||
[Note]
|
||||
Content = Hi is a good man!
|
||||
Cities = HangZhou, Boston
|
||||
```
|
||||
|
||||
```go
|
||||
type Note struct {
|
||||
Content string
|
||||
Cities []string
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int `ini:"age"`
|
||||
Male bool
|
||||
Born time.Time
|
||||
Note
|
||||
Created time.Time `ini:"-"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg, err := ini.Load("path/to/ini")
|
||||
// ...
|
||||
p := new(Person)
|
||||
err = cfg.MapTo(p)
|
||||
// ...
|
||||
|
||||
// Things can be simpler.
|
||||
err = ini.MapTo(p, "path/to/ini")
|
||||
// ...
|
||||
|
||||
// Just map a section? Fine.
|
||||
n := new(Note)
|
||||
err = cfg.Section("Note").MapTo(n)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Can I have default value for field? Absolutely.
|
||||
|
||||
Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
|
||||
|
||||
```go
|
||||
// ...
|
||||
p := &Person{
|
||||
Name: "Joe",
|
||||
}
|
||||
// ...
|
||||
```
|
||||
|
||||
It's really cool, but what's the point if you can't give me my file back from struct?
|
||||
|
||||
### Reflect From Struct
|
||||
|
||||
Why not?
|
||||
|
||||
```go
|
||||
type Embeded struct {
|
||||
Dates []time.Time `delim:"|"`
|
||||
Places []string `ini:"places,omitempty"`
|
||||
None []int `ini:",omitempty"`
|
||||
}
|
||||
|
||||
type Author struct {
|
||||
Name string `ini:"NAME"`
|
||||
Male bool
|
||||
Age int
|
||||
GPA float64
|
||||
NeverMind string `ini:"-"`
|
||||
*Embeded
|
||||
}
|
||||
|
||||
func main() {
|
||||
a := &Author{"Unknwon", true, 21, 2.8, "",
|
||||
&Embeded{
|
||||
[]time.Time{time.Now(), time.Now()},
|
||||
[]string{"HangZhou", "Boston"},
|
||||
[]int{},
|
||||
}}
|
||||
cfg := ini.Empty()
|
||||
err = ini.ReflectFrom(cfg, a)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
So, what do I get?
|
||||
|
||||
```ini
|
||||
NAME = Unknwon
|
||||
Male = true
|
||||
Age = 21
|
||||
GPA = 2.8
|
||||
|
||||
[Embeded]
|
||||
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
|
||||
places = HangZhou,Boston
|
||||
```
|
||||
|
||||
#### Name Mapper
|
||||
|
||||
To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
|
||||
|
||||
There are 2 built-in name mappers:
|
||||
|
||||
- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
|
||||
- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
|
||||
|
||||
To use them:
|
||||
|
||||
```go
|
||||
type Info struct {
|
||||
PackageName string
|
||||
}
|
||||
|
||||
func main() {
|
||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
|
||||
// ...
|
||||
|
||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
||||
// ...
|
||||
info := new(Info)
|
||||
cfg.NameMapper = ini.AllCapsUnderscore
|
||||
err = cfg.MapTo(info)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
|
||||
|
||||
#### Value Mapper
|
||||
|
||||
To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
|
||||
|
||||
```go
|
||||
type Env struct {
|
||||
Foo string `ini:"foo"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
|
||||
cfg.ValueMapper = os.ExpandEnv
|
||||
// ...
|
||||
env := &Env{}
|
||||
err = cfg.Section("env").MapTo(env)
|
||||
}
|
||||
```
|
||||
|
||||
This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
|
||||
|
||||
#### Other Notes On Map/Reflect
|
||||
|
||||
Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
|
||||
[Child]
|
||||
Age = 21
|
||||
```
|
||||
|
||||
What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child `ini:"Parent"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
Age = 21
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
|
||||
- [File An Issue](https://github.com/go-ini/ini/issues/new)
|
||||
|
||||
## FAQs
|
||||
|
||||
### What does `BlockMode` field do?
|
||||
|
||||
By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
|
||||
|
||||
### Why another INI library?
|
||||
|
||||
Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
|
||||
|
||||
To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
|
||||
|
||||
## License
|
||||
|
||||
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
|
733
vendor/github.com/go-ini/ini/README_ZH.md
generated
vendored
Normal file
733
vendor/github.com/go-ini/ini/README_ZH.md
generated
vendored
Normal file
|
@ -0,0 +1,733 @@
|
|||
本包提供了 Go 语言中读写 INI 文件的功能。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`)
|
||||
- 支持递归读取键值
|
||||
- 支持读取父子分区
|
||||
- 支持读取自增键名
|
||||
- 支持读取多行的键值
|
||||
- 支持大量辅助方法
|
||||
- 支持在读取时直接转换为 Go 语言类型
|
||||
- 支持读取和 **写入** 分区和键的注释
|
||||
- 轻松操作分区、键值和注释
|
||||
- 在保存文件时分区和键值会保持原有的顺序
|
||||
|
||||
## 下载安装
|
||||
|
||||
使用一个特定版本:
|
||||
|
||||
go get gopkg.in/ini.v1
|
||||
|
||||
使用最新版:
|
||||
|
||||
go get github.com/go-ini/ini
|
||||
|
||||
如需更新请添加 `-u` 选项。
|
||||
|
||||
### 测试安装
|
||||
|
||||
如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
|
||||
|
||||
go get -t gopkg.in/ini.v1
|
||||
|
||||
如需更新请添加 `-u` 选项。
|
||||
|
||||
## 开始使用
|
||||
|
||||
### 从数据源加载
|
||||
|
||||
一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
|
||||
|
||||
```go
|
||||
cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
|
||||
```
|
||||
|
||||
或者从一个空白的文件开始:
|
||||
|
||||
```go
|
||||
cfg := ini.Empty()
|
||||
```
|
||||
|
||||
当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
|
||||
|
||||
```go
|
||||
err := cfg.Append("other file", []byte("other raw data"))
|
||||
```
|
||||
|
||||
当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
|
||||
|
||||
```go
|
||||
cfg, err := ini.LooseLoad("filename", "filename_404")
|
||||
```
|
||||
|
||||
更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
|
||||
|
||||
#### 忽略键名的大小写
|
||||
|
||||
有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
|
||||
|
||||
```go
|
||||
cfg, err := ini.InsensitiveLoad("filename")
|
||||
//...
|
||||
|
||||
// sec1 和 sec2 指向同一个分区对象
|
||||
sec1, err := cfg.GetSection("Section")
|
||||
sec2, err := cfg.GetSection("SecTIOn")
|
||||
|
||||
// key1 和 key2 指向同一个键对象
|
||||
key1, err := sec1.GetKey("Key")
|
||||
key2, err := sec2.GetKey("KeY")
|
||||
```
|
||||
|
||||
#### 类似 MySQL 配置中的布尔值键
|
||||
|
||||
MySQL 的配置文件中会出现没有具体值的布尔类型的键:
|
||||
|
||||
```ini
|
||||
[mysqld]
|
||||
...
|
||||
skip-host-cache
|
||||
skip-name-resolve
|
||||
```
|
||||
|
||||
默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
|
||||
```
|
||||
|
||||
这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
|
||||
|
||||
如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`:
|
||||
|
||||
```go
|
||||
key, err := sec.NewBooleanKey("skip-host-cache")
|
||||
```
|
||||
|
||||
#### 关于注释
|
||||
|
||||
下述几种情况的内容将被视为注释:
|
||||
|
||||
1. 所有以 `#` 或 `;` 开头的行
|
||||
2. 所有在 `#` 或 `;` 之后的内容
|
||||
3. 分区标签后的文字 (即 `[分区名]` 之后的内容)
|
||||
|
||||
如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
|
||||
|
||||
除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini"))
|
||||
```
|
||||
|
||||
### 操作分区(Section)
|
||||
|
||||
获取指定分区:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("section name")
|
||||
```
|
||||
|
||||
如果您想要获取默认分区,则可以用空字符串代替分区名:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("")
|
||||
```
|
||||
|
||||
当您非常确定某个分区是存在的,可以使用以下简便方法:
|
||||
|
||||
```go
|
||||
section := cfg.Section("section name")
|
||||
```
|
||||
|
||||
如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
|
||||
|
||||
创建一个分区:
|
||||
|
||||
```go
|
||||
err := cfg.NewSection("new section")
|
||||
```
|
||||
|
||||
获取所有分区对象或名称:
|
||||
|
||||
```go
|
||||
sections := cfg.Sections()
|
||||
names := cfg.SectionStrings()
|
||||
```
|
||||
|
||||
### 操作键(Key)
|
||||
|
||||
获取某个分区下的键:
|
||||
|
||||
```go
|
||||
key, err := cfg.Section("").GetKey("key name")
|
||||
```
|
||||
|
||||
和分区一样,您也可以直接获取键而忽略错误处理:
|
||||
|
||||
```go
|
||||
key := cfg.Section("").Key("key name")
|
||||
```
|
||||
|
||||
判断某个键是否存在:
|
||||
|
||||
```go
|
||||
yes := cfg.Section("").HasKey("key name")
|
||||
```
|
||||
|
||||
创建一个新的键:
|
||||
|
||||
```go
|
||||
err := cfg.Section("").NewKey("name", "value")
|
||||
```
|
||||
|
||||
获取分区下的所有键或键名:
|
||||
|
||||
```go
|
||||
keys := cfg.Section("").Keys()
|
||||
names := cfg.Section("").KeyStrings()
|
||||
```
|
||||
|
||||
获取分区下的所有键值对的克隆:
|
||||
|
||||
```go
|
||||
hash := cfg.Section("").KeysHash()
|
||||
```
|
||||
|
||||
### 操作键值(Value)
|
||||
|
||||
获取一个类型为字符串(string)的值:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").String()
|
||||
```
|
||||
|
||||
获取值的同时通过自定义函数进行处理验证:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
||||
if len(in) == 0 {
|
||||
return "default"
|
||||
}
|
||||
return in
|
||||
})
|
||||
```
|
||||
|
||||
如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").Value()
|
||||
```
|
||||
|
||||
判断某个原值是否存在:
|
||||
|
||||
```go
|
||||
yes := cfg.Section("").HasValue("test value")
|
||||
```
|
||||
|
||||
获取其它类型的值:
|
||||
|
||||
```go
|
||||
// 布尔值的规则:
|
||||
// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
|
||||
// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
|
||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
||||
v, err = cfg.Section("").Key("INT").Int()
|
||||
v, err = cfg.Section("").Key("INT64").Int64()
|
||||
v, err = cfg.Section("").Key("UINT").Uint()
|
||||
v, err = cfg.Section("").Key("UINT64").Uint64()
|
||||
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
|
||||
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
|
||||
|
||||
v = cfg.Section("").Key("BOOL").MustBool()
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64()
|
||||
v = cfg.Section("").Key("INT").MustInt()
|
||||
v = cfg.Section("").Key("INT64").MustInt64()
|
||||
v = cfg.Section("").Key("UINT").MustUint()
|
||||
v = cfg.Section("").Key("UINT64").MustUint64()
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
|
||||
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
|
||||
|
||||
// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
|
||||
// 当键不存在或者转换失败时,则会直接返回该默认值。
|
||||
// 但是,MustString 方法必须传递一个默认值。
|
||||
|
||||
v = cfg.Seciont("").Key("String").MustString("default")
|
||||
v = cfg.Section("").Key("BOOL").MustBool(true)
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
|
||||
v = cfg.Section("").Key("INT").MustInt(10)
|
||||
v = cfg.Section("").Key("INT64").MustInt64(99)
|
||||
v = cfg.Section("").Key("UINT").MustUint(3)
|
||||
v = cfg.Section("").Key("UINT64").MustUint64(6)
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
|
||||
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
|
||||
```
|
||||
|
||||
如果我的值有好多行怎么办?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
ADDRESS = """404 road,
|
||||
NotFound, State, 5000
|
||||
Earth"""
|
||||
```
|
||||
|
||||
嗯哼?小 case!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("ADDRESS").String()
|
||||
|
||||
/* --- start ---
|
||||
404 road,
|
||||
NotFound, State, 5000
|
||||
Earth
|
||||
------ end --- */
|
||||
```
|
||||
|
||||
赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
two_lines = how about \
|
||||
continuation lines?
|
||||
lots_of_lines = 1 \
|
||||
2 \
|
||||
3 \
|
||||
4
|
||||
```
|
||||
|
||||
简直是小菜一碟!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
|
||||
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
|
||||
```
|
||||
|
||||
可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
|
||||
|
||||
```go
|
||||
cfg, err := ini.LoadSources(ini.LoadOptions{
|
||||
IgnoreContinuation: true,
|
||||
}, "filename")
|
||||
```
|
||||
|
||||
哇靠给力啊!
|
||||
|
||||
需要注意的是,值两侧的单引号会被自动剔除:
|
||||
|
||||
```ini
|
||||
foo = "some value" // foo: some value
|
||||
bar = 'some value' // bar: some value
|
||||
```
|
||||
|
||||
这就是全部了?哈哈,当然不是。
|
||||
|
||||
#### 操作键值的辅助方法
|
||||
|
||||
获取键值时设定候选值:
|
||||
|
||||
```go
|
||||
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
|
||||
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
|
||||
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
|
||||
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
|
||||
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
|
||||
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
|
||||
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
|
||||
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
|
||||
```
|
||||
|
||||
如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
|
||||
|
||||
验证获取的值是否在指定范围内:
|
||||
|
||||
```go
|
||||
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
|
||||
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
|
||||
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
|
||||
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
|
||||
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
|
||||
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
|
||||
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
|
||||
```
|
||||
|
||||
##### 自动分割键值到切片(slice)
|
||||
|
||||
当存在无效输入时,使用零值代替:
|
||||
|
||||
```go
|
||||
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
|
||||
// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
|
||||
vals = cfg.Section("").Key("STRINGS").Strings(",")
|
||||
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
|
||||
vals = cfg.Section("").Key("INTS").Ints(",")
|
||||
vals = cfg.Section("").Key("INT64S").Int64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").Uints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").Times(",")
|
||||
```
|
||||
|
||||
从结果切片中剔除无效输入:
|
||||
|
||||
```go
|
||||
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
|
||||
// Input: how, 2.2, are, you -> [2.2]
|
||||
vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
|
||||
vals = cfg.Section("").Key("INTS").ValidInts(",")
|
||||
vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").ValidUints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").ValidTimes(",")
|
||||
```
|
||||
|
||||
当存在无效输入时,直接返回错误:
|
||||
|
||||
```go
|
||||
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
|
||||
// Input: how, 2.2, are, you -> error
|
||||
vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
|
||||
vals = cfg.Section("").Key("INTS").StrictInts(",")
|
||||
vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").StrictUints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").StrictTimes(",")
|
||||
```
|
||||
|
||||
### 保存配置
|
||||
|
||||
终于到了这个时刻,是时候保存一下配置了。
|
||||
|
||||
比较原始的做法是输出配置到某个文件:
|
||||
|
||||
```go
|
||||
// ...
|
||||
err = cfg.SaveTo("my.ini")
|
||||
err = cfg.SaveToIndent("my.ini", "\t")
|
||||
```
|
||||
|
||||
另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
|
||||
|
||||
```go
|
||||
// ...
|
||||
cfg.WriteTo(writer)
|
||||
cfg.WriteToIndent(writer, "\t")
|
||||
```
|
||||
|
||||
默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能:
|
||||
|
||||
```go
|
||||
ini.PrettyFormat = false
|
||||
```
|
||||
|
||||
## 高级用法
|
||||
|
||||
### 递归读取键值
|
||||
|
||||
在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
|
||||
[author]
|
||||
NAME = Unknwon
|
||||
GITHUB = https://github.com/%(NAME)s
|
||||
|
||||
[package]
|
||||
FULL_NAME = github.com/go-ini/%(NAME)s
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
|
||||
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
|
||||
```
|
||||
|
||||
### 读取父子分区
|
||||
|
||||
您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
VERSION = v1
|
||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
||||
|
||||
[package]
|
||||
CLONE_URL = https://%(IMPORT_PATH)s
|
||||
|
||||
[package.sub]
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
|
||||
```
|
||||
|
||||
#### 获取上级父分区下的所有键名
|
||||
|
||||
```go
|
||||
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
|
||||
```
|
||||
|
||||
### 无法解析的分区
|
||||
|
||||
如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
|
||||
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
|
||||
|
||||
body := cfg.Section("COMMENTS").Body()
|
||||
|
||||
/* --- start ---
|
||||
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
|
||||
------ end --- */
|
||||
```
|
||||
|
||||
### 读取自增键名
|
||||
|
||||
如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
|
||||
|
||||
```ini
|
||||
[features]
|
||||
-: Support read/write comments of keys and sections
|
||||
-: Support auto-increment of key names
|
||||
-: Support load multiple files to overwrite key values
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
|
||||
```
|
||||
|
||||
### 映射到结构
|
||||
|
||||
想要使用更加面向对象的方式玩转 INI 吗?好主意。
|
||||
|
||||
```ini
|
||||
Name = Unknwon
|
||||
age = 21
|
||||
Male = true
|
||||
Born = 1993-01-01T20:17:05Z
|
||||
|
||||
[Note]
|
||||
Content = Hi is a good man!
|
||||
Cities = HangZhou, Boston
|
||||
```
|
||||
|
||||
```go
|
||||
type Note struct {
|
||||
Content string
|
||||
Cities []string
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int `ini:"age"`
|
||||
Male bool
|
||||
Born time.Time
|
||||
Note
|
||||
Created time.Time `ini:"-"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg, err := ini.Load("path/to/ini")
|
||||
// ...
|
||||
p := new(Person)
|
||||
err = cfg.MapTo(p)
|
||||
// ...
|
||||
|
||||
// 一切竟可以如此的简单。
|
||||
err = ini.MapTo(p, "path/to/ini")
|
||||
// ...
|
||||
|
||||
// 嗯哼?只需要映射一个分区吗?
|
||||
n := new(Note)
|
||||
err = cfg.Section("Note").MapTo(n)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
|
||||
|
||||
```go
|
||||
// ...
|
||||
p := &Person{
|
||||
Name: "Joe",
|
||||
}
|
||||
// ...
|
||||
```
|
||||
|
||||
这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
|
||||
|
||||
### 从结构反射
|
||||
|
||||
可是,我有说不能吗?
|
||||
|
||||
```go
|
||||
type Embeded struct {
|
||||
Dates []time.Time `delim:"|"`
|
||||
Places []string `ini:"places,omitempty"`
|
||||
None []int `ini:",omitempty"`
|
||||
}
|
||||
|
||||
type Author struct {
|
||||
Name string `ini:"NAME"`
|
||||
Male bool
|
||||
Age int
|
||||
GPA float64
|
||||
NeverMind string `ini:"-"`
|
||||
*Embeded
|
||||
}
|
||||
|
||||
func main() {
|
||||
a := &Author{"Unknwon", true, 21, 2.8, "",
|
||||
&Embeded{
|
||||
[]time.Time{time.Now(), time.Now()},
|
||||
[]string{"HangZhou", "Boston"},
|
||||
[]int{},
|
||||
}}
|
||||
cfg := ini.Empty()
|
||||
err = ini.ReflectFrom(cfg, a)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
瞧瞧,奇迹发生了。
|
||||
|
||||
```ini
|
||||
NAME = Unknwon
|
||||
Male = true
|
||||
Age = 21
|
||||
GPA = 2.8
|
||||
|
||||
[Embeded]
|
||||
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
|
||||
places = HangZhou,Boston
|
||||
```
|
||||
|
||||
#### 名称映射器(Name Mapper)
|
||||
|
||||
为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
|
||||
|
||||
目前有 2 款内置的映射器:
|
||||
|
||||
- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
|
||||
- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
|
||||
|
||||
使用方法:
|
||||
|
||||
```go
|
||||
type Info struct{
|
||||
PackageName string
|
||||
}
|
||||
|
||||
func main() {
|
||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
|
||||
// ...
|
||||
|
||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
||||
// ...
|
||||
info := new(Info)
|
||||
cfg.NameMapper = ini.AllCapsUnderscore
|
||||
err = cfg.MapTo(info)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
|
||||
|
||||
#### 值映射器(Value Mapper)
|
||||
|
||||
值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
|
||||
|
||||
```go
|
||||
type Env struct {
|
||||
Foo string `ini:"foo"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
|
||||
cfg.ValueMapper = os.ExpandEnv
|
||||
// ...
|
||||
env := &Env{}
|
||||
err = cfg.Section("env").MapTo(env)
|
||||
}
|
||||
```
|
||||
|
||||
本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
|
||||
|
||||
#### 映射/反射的其它说明
|
||||
|
||||
任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
示例配置文件:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
|
||||
[Child]
|
||||
Age = 21
|
||||
```
|
||||
|
||||
很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child `ini:"Parent"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
示例配置文件:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
Age = 21
|
||||
```
|
||||
|
||||
## 获取帮助
|
||||
|
||||
- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
|
||||
- [创建工单](https://github.com/go-ini/ini/issues/new)
|
||||
|
||||
## 常见问题
|
||||
|
||||
### 字段 `BlockMode` 是什么?
|
||||
|
||||
默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
|
||||
|
||||
### 为什么要写另一个 INI 解析库?
|
||||
|
||||
许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
|
||||
|
||||
为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
|
32
vendor/github.com/go-ini/ini/error.go
generated
vendored
Normal file
32
vendor/github.com/go-ini/ini/error.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2016 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ErrDelimiterNotFound struct {
|
||||
Line string
|
||||
}
|
||||
|
||||
func IsErrDelimiterNotFound(err error) bool {
|
||||
_, ok := err.(ErrDelimiterNotFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err ErrDelimiterNotFound) Error() string {
|
||||
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
|
||||
}
|
561
vendor/github.com/go-ini/ini/ini.go
generated
vendored
Normal file
561
vendor/github.com/go-ini/ini/ini.go
generated
vendored
Normal file
|
@ -0,0 +1,561 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// Package ini provides INI file read and write functionality in Go.
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name for default section. You can use this constant or the string literal.
|
||||
// In most of cases, an empty string is all you need to access the section.
|
||||
DEFAULT_SECTION = "DEFAULT"
|
||||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
_DEPTH_VALUES = 99
|
||||
_VERSION = "1.28.0"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
func Version() string {
|
||||
return _VERSION
|
||||
}
|
||||
|
||||
var (
|
||||
// Delimiter to determine or compose a new line.
|
||||
// This variable will be changed to "\r\n" automatically on Windows
|
||||
// at package init time.
|
||||
LineBreak = "\n"
|
||||
|
||||
// Variable regexp pattern: %(variable)s
|
||||
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
|
||||
|
||||
// Indicate whether to align "=" sign with spaces to produce pretty output
|
||||
// or reduce all possible spaces for compact format.
|
||||
PrettyFormat = true
|
||||
|
||||
// Explicitly write DEFAULT section header
|
||||
DefaultHeader = false
|
||||
|
||||
// Indicate whether to put a line between sections
|
||||
PrettySection = true
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
LineBreak = "\r\n"
|
||||
}
|
||||
}
|
||||
|
||||
func inSlice(str string, s []string) bool {
|
||||
for _, v := range s {
|
||||
if str == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// dataSource is an interface that returns object which can be read and closed.
|
||||
type dataSource interface {
|
||||
ReadCloser() (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// sourceFile represents an object that contains content on the local file system.
|
||||
type sourceFile struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
|
||||
return os.Open(s.name)
|
||||
}
|
||||
|
||||
type bytesReadCloser struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.reader.Read(p)
|
||||
}
|
||||
|
||||
func (rc *bytesReadCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sourceData represents an object that contains content in memory.
|
||||
type sourceData struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
|
||||
}
|
||||
|
||||
// sourceReadCloser represents an input stream with Close method.
|
||||
type sourceReadCloser struct {
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
|
||||
return s.reader, nil
|
||||
}
|
||||
|
||||
// File represents a combination of a or more INI file(s) in memory.
|
||||
type File struct {
|
||||
// Should make things safe, but sometimes doesn't matter.
|
||||
BlockMode bool
|
||||
// Make sure data is safe in multiple goroutines.
|
||||
lock sync.RWMutex
|
||||
|
||||
// Allow combination of multiple data sources.
|
||||
dataSources []dataSource
|
||||
// Actual data is stored here.
|
||||
sections map[string]*Section
|
||||
|
||||
// To keep data in order.
|
||||
sectionList []string
|
||||
|
||||
options LoadOptions
|
||||
|
||||
NameMapper
|
||||
ValueMapper
|
||||
}
|
||||
|
||||
// newFile initializes File object with given data sources.
|
||||
func newFile(dataSources []dataSource, opts LoadOptions) *File {
|
||||
return &File{
|
||||
BlockMode: true,
|
||||
dataSources: dataSources,
|
||||
sections: make(map[string]*Section),
|
||||
sectionList: make([]string, 0, 10),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func parseDataSource(source interface{}) (dataSource, error) {
|
||||
switch s := source.(type) {
|
||||
case string:
|
||||
return sourceFile{s}, nil
|
||||
case []byte:
|
||||
return &sourceData{s}, nil
|
||||
case io.ReadCloser:
|
||||
return &sourceReadCloser{s}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
|
||||
}
|
||||
}
|
||||
|
||||
type LoadOptions struct {
|
||||
// Loose indicates whether the parser should ignore nonexistent files or return error.
|
||||
Loose bool
|
||||
// Insensitive indicates whether the parser forces all section and key names to lowercase.
|
||||
Insensitive bool
|
||||
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
|
||||
IgnoreContinuation bool
|
||||
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
|
||||
IgnoreInlineComment bool
|
||||
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
|
||||
// This type of keys are mostly used in my.cnf.
|
||||
AllowBooleanKeys bool
|
||||
// AllowShadows indicates whether to keep track of keys with same name under same section.
|
||||
AllowShadows bool
|
||||
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
|
||||
// conform to key/value pairs. Specify the names of those blocks here.
|
||||
UnparseableSections []string
|
||||
}
|
||||
|
||||
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
|
||||
sources := make([]dataSource, len(others)+1)
|
||||
sources[0], err = parseDataSource(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range others {
|
||||
sources[i+1], err = parseDataSource(others[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
f := newFile(sources, opts)
|
||||
if err = f.Reload(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Load loads and parses from INI data sources.
|
||||
// Arguments can be mixed of file name with string type, or raw data in []byte.
|
||||
// It will return error if list contains nonexistent files.
|
||||
func Load(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{}, source, others...)
|
||||
}
|
||||
|
||||
// LooseLoad has exactly same functionality as Load function
|
||||
// except it ignores nonexistent files instead of returning error.
|
||||
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{Loose: true}, source, others...)
|
||||
}
|
||||
|
||||
// InsensitiveLoad has exactly same functionality as Load function
|
||||
// except it forces all section and key names to be lowercased.
|
||||
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
|
||||
}
|
||||
|
||||
// InsensitiveLoad has exactly same functionality as Load function
|
||||
// except it allows have shadow keys.
|
||||
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
|
||||
}
|
||||
|
||||
// Empty returns an empty file object.
|
||||
func Empty() *File {
|
||||
// Ignore error here, we sure our data is good.
|
||||
f, _ := Load([]byte(""))
|
||||
return f
|
||||
}
|
||||
|
||||
// NewSection creates a new section.
|
||||
func (f *File) NewSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new section: empty section name")
|
||||
} else if f.options.Insensitive && name != DEFAULT_SECTION {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if inSlice(name, f.sectionList) {
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
f.sectionList = append(f.sectionList, name)
|
||||
f.sections[name] = newSection(f, name)
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
// NewRawSection creates a new section with an unparseable body.
|
||||
func (f *File) NewRawSection(name, body string) (*Section, error) {
|
||||
section, err := f.NewSection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
section.isRawSection = true
|
||||
section.rawBody = body
|
||||
return section, nil
|
||||
}
|
||||
|
||||
// NewSections creates a list of sections.
|
||||
func (f *File) NewSections(names ...string) (err error) {
|
||||
for _, name := range names {
|
||||
if _, err = f.NewSection(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSection returns section by given name.
|
||||
func (f *File) GetSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
name = DEFAULT_SECTION
|
||||
} else if f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sec := f.sections[name]
|
||||
if sec == nil {
|
||||
return nil, fmt.Errorf("section '%s' does not exist", name)
|
||||
}
|
||||
return sec, nil
|
||||
}
|
||||
|
||||
// Section assumes named section exists and returns a zero-value when not.
|
||||
func (f *File) Section(name string) *Section {
|
||||
sec, err := f.GetSection(name)
|
||||
if err != nil {
|
||||
// Note: It's OK here because the only possible error is empty section name,
|
||||
// but if it's empty, this piece of code won't be executed.
|
||||
sec, _ = f.NewSection(name)
|
||||
return sec
|
||||
}
|
||||
return sec
|
||||
}
|
||||
|
||||
// Section returns list of Section.
|
||||
func (f *File) Sections() []*Section {
|
||||
sections := make([]*Section, len(f.sectionList))
|
||||
for i := range f.sectionList {
|
||||
sections[i] = f.Section(f.sectionList[i])
|
||||
}
|
||||
return sections
|
||||
}
|
||||
|
||||
// ChildSections returns a list of child sections of given section name.
|
||||
func (f *File) ChildSections(name string) []*Section {
|
||||
return f.Section(name).ChildSections()
|
||||
}
|
||||
|
||||
// SectionStrings returns list of section names.
|
||||
func (f *File) SectionStrings() []string {
|
||||
list := make([]string, len(f.sectionList))
|
||||
copy(list, f.sectionList)
|
||||
return list
|
||||
}
|
||||
|
||||
// DeleteSection deletes a section.
|
||||
func (f *File) DeleteSection(name string) {
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(name) == 0 {
|
||||
name = DEFAULT_SECTION
|
||||
}
|
||||
|
||||
for i, s := range f.sectionList {
|
||||
if s == name {
|
||||
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
|
||||
delete(f.sections, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) reload(s dataSource) error {
|
||||
r, err := s.ReadCloser()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
return f.parse(r)
|
||||
}
|
||||
|
||||
// Reload reloads and parses all data sources.
|
||||
func (f *File) Reload() (err error) {
|
||||
for _, s := range f.dataSources {
|
||||
if err = f.reload(s); err != nil {
|
||||
// In loose mode, we create an empty default section for nonexistent files.
|
||||
if os.IsNotExist(err) && f.options.Loose {
|
||||
f.parse(bytes.NewBuffer(nil))
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends one or more data sources and reloads automatically.
|
||||
func (f *File) Append(source interface{}, others ...interface{}) error {
|
||||
ds, err := parseDataSource(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
for _, s := range others {
|
||||
ds, err = parseDataSource(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
}
|
||||
return f.Reload()
|
||||
}
|
||||
|
||||
// WriteToIndent writes content into io.Writer with given indention.
|
||||
// If PrettyFormat has been set to be true,
|
||||
// it will align "=" sign with spaces under each section.
|
||||
func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
|
||||
equalSign := "="
|
||||
if PrettyFormat {
|
||||
equalSign = " = "
|
||||
}
|
||||
|
||||
// Use buffer to make sure target is safe until finish encoding.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for i, sname := range f.sectionList {
|
||||
sec := f.Section(sname)
|
||||
if len(sec.Comment) > 0 {
|
||||
if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
|
||||
sec.Comment = "; " + sec.Comment
|
||||
}
|
||||
if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 || DefaultHeader {
|
||||
if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
// Write nothing if default section is empty
|
||||
if len(sec.keyList) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if sec.isRawSection {
|
||||
if _, err = buf.WriteString(sec.rawBody); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Count and generate alignment length and buffer spaces using the
|
||||
// longest key. Keys may be modifed if they contain certain characters so
|
||||
// we need to take that into account in our calculation.
|
||||
alignLength := 0
|
||||
if PrettyFormat {
|
||||
for _, kname := range sec.keyList {
|
||||
keyLength := len(kname)
|
||||
// First case will surround key by ` and second by """
|
||||
if strings.ContainsAny(kname, "\"=:") {
|
||||
keyLength += 2
|
||||
} else if strings.Contains(kname, "`") {
|
||||
keyLength += 6
|
||||
}
|
||||
|
||||
if keyLength > alignLength {
|
||||
alignLength = keyLength
|
||||
}
|
||||
}
|
||||
}
|
||||
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
|
||||
|
||||
KEY_LIST:
|
||||
for _, kname := range sec.keyList {
|
||||
key := sec.Key(kname)
|
||||
if len(key.Comment) > 0 {
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
if key.Comment[0] != '#' && key.Comment[0] != ';' {
|
||||
key.Comment = "; " + key.Comment
|
||||
}
|
||||
if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
|
||||
switch {
|
||||
case key.isAutoIncrement:
|
||||
kname = "-"
|
||||
case strings.ContainsAny(kname, "\"=:"):
|
||||
kname = "`" + kname + "`"
|
||||
case strings.Contains(kname, "`"):
|
||||
kname = `"""` + kname + `"""`
|
||||
}
|
||||
|
||||
for _, val := range key.ValueWithShadows() {
|
||||
if _, err = buf.WriteString(kname); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if key.isBooleanType {
|
||||
if kname != sec.keyList[len(sec.keyList)-1] {
|
||||
buf.WriteString(LineBreak)
|
||||
}
|
||||
continue KEY_LIST
|
||||
}
|
||||
|
||||
// Write out alignment spaces before "=" sign
|
||||
if PrettyFormat {
|
||||
buf.Write(alignSpaces[:alignLength-len(kname)])
|
||||
}
|
||||
|
||||
// In case key value contains "\n", "`", "\"", "#" or ";"
|
||||
if strings.ContainsAny(val, "\n`") {
|
||||
val = `"""` + val + `"""`
|
||||
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
|
||||
val = "`" + val + "`"
|
||||
}
|
||||
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if PrettySection {
|
||||
// Put a line between sections
|
||||
if _, err = buf.WriteString(LineBreak); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return buf.WriteTo(w)
|
||||
}
|
||||
|
||||
// WriteTo writes file content into io.Writer.
|
||||
func (f *File) WriteTo(w io.Writer) (int64, error) {
|
||||
return f.WriteToIndent(w, "")
|
||||
}
|
||||
|
||||
// SaveToIndent writes content to file system with given value indention.
|
||||
func (f *File) SaveToIndent(filename, indent string) error {
|
||||
// Note: Because we are truncating with os.Create,
|
||||
// so it's safer to save to a temporary file location and rename afte done.
|
||||
tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
|
||||
defer os.Remove(tmpPath)
|
||||
|
||||
fw, err := os.Create(tmpPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = f.WriteToIndent(fw, indent); err != nil {
|
||||
fw.Close()
|
||||
return err
|
||||
}
|
||||
fw.Close()
|
||||
|
||||
// Remove old file and rename the new one.
|
||||
os.Remove(filename)
|
||||
return os.Rename(tmpPath, filename)
|
||||
}
|
||||
|
||||
// SaveTo writes content to file system.
|
||||
func (f *File) SaveTo(filename string) error {
|
||||
return f.SaveToIndent(filename, "")
|
||||
}
|
491
vendor/github.com/go-ini/ini/ini_test.go
generated
vendored
Normal file
491
vendor/github.com/go-ini/ini/ini_test.go
generated
vendored
Normal file
|
@ -0,0 +1,491 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func Test_Version(t *testing.T) {
|
||||
Convey("Get version", t, func() {
|
||||
So(Version(), ShouldEqual, _VERSION)
|
||||
})
|
||||
}
|
||||
|
||||
const _CONF_DATA = `
|
||||
; Package name
|
||||
NAME = ini
|
||||
; Package version
|
||||
VERSION = v1
|
||||
; Package import path
|
||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
||||
|
||||
# Information about package author
|
||||
# Bio can be written in multiple lines.
|
||||
[author]
|
||||
NAME = Unknwon ; Succeeding comment
|
||||
E-MAIL = fake@localhost
|
||||
GITHUB = https://github.com/%(NAME)s
|
||||
BIO = """Gopher.
|
||||
Coding addict.
|
||||
Good man.
|
||||
""" # Succeeding comment
|
||||
|
||||
[package]
|
||||
CLONE_URL = https://%(IMPORT_PATH)s
|
||||
|
||||
[package.sub]
|
||||
UNUSED_KEY = should be deleted
|
||||
|
||||
[features]
|
||||
-: Support read/write comments of keys and sections
|
||||
-: Support auto-increment of key names
|
||||
-: Support load multiple files to overwrite key values
|
||||
|
||||
[types]
|
||||
STRING = str
|
||||
BOOL = true
|
||||
BOOL_FALSE = false
|
||||
FLOAT64 = 1.25
|
||||
INT = 10
|
||||
TIME = 2015-01-01T20:17:05Z
|
||||
DURATION = 2h45m
|
||||
UINT = 3
|
||||
|
||||
[array]
|
||||
STRINGS = en, zh, de
|
||||
FLOAT64S = 1.1, 2.2, 3.3
|
||||
INTS = 1, 2, 3
|
||||
UINTS = 1, 2, 3
|
||||
TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z
|
||||
|
||||
[note]
|
||||
empty_lines = next line is empty\
|
||||
|
||||
; Comment before the section
|
||||
[comments] ; This is a comment for the section too
|
||||
; Comment before key
|
||||
key = "value"
|
||||
key2 = "value2" ; This is a comment for key2
|
||||
key3 = "one", "two", "three"
|
||||
|
||||
[advance]
|
||||
value with quotes = "some value"
|
||||
value quote2 again = 'some value'
|
||||
includes comment sign = ` + "`" + "my#password" + "`" + `
|
||||
includes comment sign2 = ` + "`" + "my;password" + "`" + `
|
||||
true = 2+3=5
|
||||
"1+1=2" = true
|
||||
"""6+1=7""" = true
|
||||
"""` + "`" + `5+5` + "`" + `""" = 10
|
||||
` + "`" + `"6+6"` + "`" + ` = 12
|
||||
` + "`" + `7-2=4` + "`" + ` = false
|
||||
ADDRESS = ` + "`" + `404 road,
|
||||
NotFound, State, 50000` + "`" + `
|
||||
|
||||
two_lines = how about \
|
||||
continuation lines?
|
||||
lots_of_lines = 1 \
|
||||
2 \
|
||||
3 \
|
||||
4 \
|
||||
`
|
||||
|
||||
func Test_Load(t *testing.T) {
|
||||
Convey("Load from data sources", t, func() {
|
||||
|
||||
Convey("Load with empty data", func() {
|
||||
So(Empty(), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Load with multiple data sources", func() {
|
||||
cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini", ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA))))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
f, err := Load([]byte(_CONF_DATA), "testdata/404.ini")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(f, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("Load with io.ReadCloser", func() {
|
||||
cfg, err := Load(ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA))))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
So(cfg.Section("").Key("NAME").String(), ShouldEqual, "ini")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Bad load process", t, func() {
|
||||
|
||||
Convey("Load from invalid data sources", func() {
|
||||
_, err := Load(_CONF_DATA)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
f, err := Load("testdata/404.ini")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(f, ShouldBeNil)
|
||||
|
||||
_, err = Load(1)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, err = Load([]byte(""), 1)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Load with bad section name", func() {
|
||||
_, err := Load([]byte("[]"))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, err = Load([]byte("["))
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Load with bad keys", func() {
|
||||
_, err := Load([]byte(`"""name`))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, err = Load([]byte(`"""name"""`))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, err = Load([]byte(`""=1`))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, err = Load([]byte(`=`))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, err = Load([]byte(`name`))
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Load with bad values", func() {
|
||||
_, err := Load([]byte(`name="""Unknwon`))
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Get section and key insensitively", t, func() {
|
||||
cfg, err := InsensitiveLoad([]byte(_CONF_DATA), "testdata/conf.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
sec, err := cfg.GetSection("Author")
|
||||
So(err, ShouldBeNil)
|
||||
So(sec, ShouldNotBeNil)
|
||||
|
||||
key, err := sec.GetKey("E-mail")
|
||||
So(err, ShouldBeNil)
|
||||
So(key, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Load with ignoring continuation lines", t, func() {
|
||||
cfg, err := LoadSources(LoadOptions{IgnoreContinuation: true}, []byte(`key1=a\b\
|
||||
key2=c\d\`))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
So(cfg.Section("").Key("key1").String(), ShouldEqual, `a\b\`)
|
||||
So(cfg.Section("").Key("key2").String(), ShouldEqual, `c\d\`)
|
||||
})
|
||||
|
||||
Convey("Load with ignoring inline comments", t, func() {
|
||||
cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, []byte(`key1=value ;comment
|
||||
key2=value #comment2`))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
So(cfg.Section("").Key("key1").String(), ShouldEqual, `value ;comment`)
|
||||
So(cfg.Section("").Key("key2").String(), ShouldEqual, `value #comment2`)
|
||||
|
||||
var buf bytes.Buffer
|
||||
cfg.WriteTo(&buf)
|
||||
So(buf.String(), ShouldEqual, `key1 = value ;comment
|
||||
key2 = value #comment2
|
||||
|
||||
`)
|
||||
})
|
||||
|
||||
Convey("Load with boolean type keys", t, func() {
|
||||
cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, []byte(`key1=hello
|
||||
key2
|
||||
#key3
|
||||
key4
|
||||
key5`))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
So(strings.Join(cfg.Section("").KeyStrings(), ","), ShouldEqual, "key1,key2,key4,key5")
|
||||
So(cfg.Section("").Key("key2").MustBool(false), ShouldBeTrue)
|
||||
|
||||
var buf bytes.Buffer
|
||||
cfg.WriteTo(&buf)
|
||||
// there is always a trailing \n at the end of the section
|
||||
So(buf.String(), ShouldEqual, `key1 = hello
|
||||
key2
|
||||
#key3
|
||||
key4
|
||||
key5
|
||||
`)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_File_ChildSections(t *testing.T) {
|
||||
Convey("Find child sections by parent name", t, func() {
|
||||
cfg, err := Load([]byte(`
|
||||
[node]
|
||||
|
||||
[node.biz1]
|
||||
|
||||
[node.biz2]
|
||||
|
||||
[node.biz3]
|
||||
|
||||
[node.bizN]
|
||||
`))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
children := cfg.ChildSections("node")
|
||||
names := make([]string, len(children))
|
||||
for i := range children {
|
||||
names[i] = children[i].name
|
||||
}
|
||||
So(strings.Join(names, ","), ShouldEqual, "node.biz1,node.biz2,node.biz3,node.bizN")
|
||||
})
|
||||
}
|
||||
|
||||
func Test_LooseLoad(t *testing.T) {
|
||||
Convey("Loose load from data sources", t, func() {
|
||||
Convey("Loose load mixed with nonexistent file", func() {
|
||||
cfg, err := LooseLoad("testdata/404.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
var fake struct {
|
||||
Name string `ini:"name"`
|
||||
}
|
||||
So(cfg.MapTo(&fake), ShouldBeNil)
|
||||
|
||||
cfg, err = LooseLoad([]byte("name=Unknwon"), "testdata/404.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg.Section("").Key("name").String(), ShouldEqual, "Unknwon")
|
||||
So(cfg.MapTo(&fake), ShouldBeNil)
|
||||
So(fake.Name, ShouldEqual, "Unknwon")
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func Test_File_Append(t *testing.T) {
|
||||
Convey("Append data sources", t, func() {
|
||||
cfg, err := Load([]byte(""))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
So(cfg.Append([]byte(""), []byte("")), ShouldBeNil)
|
||||
|
||||
Convey("Append bad data sources", func() {
|
||||
So(cfg.Append(1), ShouldNotBeNil)
|
||||
So(cfg.Append([]byte(""), 1), ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func Test_File_WriteTo(t *testing.T) {
|
||||
Convey("Write to somewhere", t, func() {
|
||||
var buf bytes.Buffer
|
||||
cfg := Empty()
|
||||
cfg.WriteTo(&buf)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_File_SaveTo_WriteTo(t *testing.T) {
|
||||
Convey("Save file", t, func() {
|
||||
cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
cfg.Section("").Key("NAME").Comment = "Package name"
|
||||
cfg.Section("author").Comment = `Information about package author
|
||||
# Bio can be written in multiple lines.`
|
||||
cfg.Section("advanced").Key("val w/ pound").SetValue("my#password")
|
||||
cfg.Section("advanced").Key("longest key has a colon : yes/no").SetValue("yes")
|
||||
So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil)
|
||||
|
||||
cfg.Section("author").Key("NAME").Comment = "This is author name"
|
||||
|
||||
So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil)
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = cfg.WriteToIndent(&buf, "\t")
|
||||
So(err, ShouldBeNil)
|
||||
So(buf.String(), ShouldEqual, `; Package name
|
||||
NAME = ini
|
||||
; Package version
|
||||
VERSION = v1
|
||||
; Package import path
|
||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
||||
|
||||
; Information about package author
|
||||
# Bio can be written in multiple lines.
|
||||
[author]
|
||||
; This is author name
|
||||
NAME = Unknwon
|
||||
E-MAIL = u@gogs.io
|
||||
GITHUB = https://github.com/%(NAME)s
|
||||
# Succeeding comment
|
||||
BIO = """Gopher.
|
||||
Coding addict.
|
||||
Good man.
|
||||
"""
|
||||
|
||||
[package]
|
||||
CLONE_URL = https://%(IMPORT_PATH)s
|
||||
|
||||
[package.sub]
|
||||
UNUSED_KEY = should be deleted
|
||||
|
||||
[features]
|
||||
- = Support read/write comments of keys and sections
|
||||
- = Support auto-increment of key names
|
||||
- = Support load multiple files to overwrite key values
|
||||
|
||||
[types]
|
||||
STRING = str
|
||||
BOOL = true
|
||||
BOOL_FALSE = false
|
||||
FLOAT64 = 1.25
|
||||
INT = 10
|
||||
TIME = 2015-01-01T20:17:05Z
|
||||
DURATION = 2h45m
|
||||
UINT = 3
|
||||
|
||||
[array]
|
||||
STRINGS = en, zh, de
|
||||
FLOAT64S = 1.1, 2.2, 3.3
|
||||
INTS = 1, 2, 3
|
||||
UINTS = 1, 2, 3
|
||||
TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z
|
||||
|
||||
[note]
|
||||
empty_lines = next line is empty
|
||||
|
||||
; Comment before the section
|
||||
; This is a comment for the section too
|
||||
[comments]
|
||||
; Comment before key
|
||||
key = value
|
||||
; This is a comment for key2
|
||||
key2 = value2
|
||||
key3 = "one", "two", "three"
|
||||
|
||||
[advance]
|
||||
value with quotes = some value
|
||||
value quote2 again = some value
|
||||
includes comment sign = `+"`"+"my#password"+"`"+`
|
||||
includes comment sign2 = `+"`"+"my;password"+"`"+`
|
||||
true = 2+3=5
|
||||
`+"`"+`1+1=2`+"`"+` = true
|
||||
`+"`"+`6+1=7`+"`"+` = true
|
||||
"""`+"`"+`5+5`+"`"+`""" = 10
|
||||
`+"`"+`"6+6"`+"`"+` = 12
|
||||
`+"`"+`7-2=4`+"`"+` = false
|
||||
ADDRESS = """404 road,
|
||||
NotFound, State, 50000"""
|
||||
two_lines = how about continuation lines?
|
||||
lots_of_lines = 1 2 3 4
|
||||
|
||||
[advanced]
|
||||
val w/ pound = `+"`"+`my#password`+"`"+`
|
||||
`+"`"+`longest key has a colon : yes/no`+"`"+` = yes
|
||||
|
||||
`)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_File_WriteTo_SectionRaw(t *testing.T) {
|
||||
Convey("Write a INI with a raw section", t, func() {
|
||||
var buf bytes.Buffer
|
||||
cfg, err := LoadSources(
|
||||
LoadOptions{
|
||||
UnparseableSections: []string{"CORE_LESSON", "COMMENTS"},
|
||||
},
|
||||
"testdata/aicc.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
cfg.WriteToIndent(&buf, "\t")
|
||||
So(buf.String(), ShouldEqual, `[Core]
|
||||
Lesson_Location = 87
|
||||
Lesson_Status = C
|
||||
Score = 3
|
||||
Time = 00:02:30
|
||||
|
||||
[CORE_LESSON]
|
||||
my lesson state data – 1111111111111111111000000000000000001110000
|
||||
111111111111111111100000000000111000000000 – end my lesson state data
|
||||
[COMMENTS]
|
||||
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
|
||||
`)
|
||||
})
|
||||
}
|
||||
|
||||
// Helpers for slice tests.
|
||||
func float64sEqual(values []float64, expected ...float64) {
|
||||
So(values, ShouldHaveLength, len(expected))
|
||||
for i, v := range expected {
|
||||
So(values[i], ShouldEqual, v)
|
||||
}
|
||||
}
|
||||
|
||||
func intsEqual(values []int, expected ...int) {
|
||||
So(values, ShouldHaveLength, len(expected))
|
||||
for i, v := range expected {
|
||||
So(values[i], ShouldEqual, v)
|
||||
}
|
||||
}
|
||||
|
||||
func int64sEqual(values []int64, expected ...int64) {
|
||||
So(values, ShouldHaveLength, len(expected))
|
||||
for i, v := range expected {
|
||||
So(values[i], ShouldEqual, v)
|
||||
}
|
||||
}
|
||||
|
||||
func uintsEqual(values []uint, expected ...uint) {
|
||||
So(values, ShouldHaveLength, len(expected))
|
||||
for i, v := range expected {
|
||||
So(values[i], ShouldEqual, v)
|
||||
}
|
||||
}
|
||||
|
||||
func uint64sEqual(values []uint64, expected ...uint64) {
|
||||
So(values, ShouldHaveLength, len(expected))
|
||||
for i, v := range expected {
|
||||
So(values[i], ShouldEqual, v)
|
||||
}
|
||||
}
|
||||
|
||||
func timesEqual(values []time.Time, expected ...time.Time) {
|
||||
So(values, ShouldHaveLength, len(expected))
|
||||
for i, v := range expected {
|
||||
So(values[i].String(), ShouldEqual, v.String())
|
||||
}
|
||||
}
|
699
vendor/github.com/go-ini/ini/key.go
generated
vendored
Normal file
699
vendor/github.com/go-ini/ini/key.go
generated
vendored
Normal file
|
@ -0,0 +1,699 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Key represents a key under a section.
|
||||
type Key struct {
|
||||
s *Section
|
||||
name string
|
||||
value string
|
||||
isAutoIncrement bool
|
||||
isBooleanType bool
|
||||
|
||||
isShadow bool
|
||||
shadows []*Key
|
||||
|
||||
Comment string
|
||||
}
|
||||
|
||||
// newKey simply return a key object with given values.
|
||||
func newKey(s *Section, name, val string) *Key {
|
||||
return &Key{
|
||||
s: s,
|
||||
name: name,
|
||||
value: val,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Key) addShadow(val string) error {
|
||||
if k.isShadow {
|
||||
return errors.New("cannot add shadow to another shadow key")
|
||||
} else if k.isAutoIncrement || k.isBooleanType {
|
||||
return errors.New("cannot add shadow to auto-increment or boolean key")
|
||||
}
|
||||
|
||||
shadow := newKey(k.s, k.name, val)
|
||||
shadow.isShadow = true
|
||||
k.shadows = append(k.shadows, shadow)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddShadow adds a new shadow key to itself.
|
||||
func (k *Key) AddShadow(val string) error {
|
||||
if !k.s.f.options.AllowShadows {
|
||||
return errors.New("shadow key is not allowed")
|
||||
}
|
||||
return k.addShadow(val)
|
||||
}
|
||||
|
||||
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
|
||||
type ValueMapper func(string) string
|
||||
|
||||
// Name returns name of key.
|
||||
func (k *Key) Name() string {
|
||||
return k.name
|
||||
}
|
||||
|
||||
// Value returns raw value of key for performance purpose.
|
||||
func (k *Key) Value() string {
|
||||
return k.value
|
||||
}
|
||||
|
||||
// ValueWithShadows returns raw values of key and its shadows if any.
|
||||
func (k *Key) ValueWithShadows() []string {
|
||||
if len(k.shadows) == 0 {
|
||||
return []string{k.value}
|
||||
}
|
||||
vals := make([]string, len(k.shadows)+1)
|
||||
vals[0] = k.value
|
||||
for i := range k.shadows {
|
||||
vals[i+1] = k.shadows[i].value
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// transformValue takes a raw value and transforms to its final string.
|
||||
func (k *Key) transformValue(val string) string {
|
||||
if k.s.f.ValueMapper != nil {
|
||||
val = k.s.f.ValueMapper(val)
|
||||
}
|
||||
|
||||
// Fail-fast if no indicate char found for recursive value
|
||||
if !strings.Contains(val, "%") {
|
||||
return val
|
||||
}
|
||||
for i := 0; i < _DEPTH_VALUES; i++ {
|
||||
vr := varPattern.FindString(val)
|
||||
if len(vr) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Take off leading '%(' and trailing ')s'.
|
||||
noption := strings.TrimLeft(vr, "%(")
|
||||
noption = strings.TrimRight(noption, ")s")
|
||||
|
||||
// Search in the same section.
|
||||
nk, err := k.s.GetKey(noption)
|
||||
if err != nil {
|
||||
// Search again in default section.
|
||||
nk, _ = k.s.f.Section("").GetKey(noption)
|
||||
}
|
||||
|
||||
// Substitute by new value and take off leading '%(' and trailing ')s'.
|
||||
val = strings.Replace(val, vr, nk.value, -1)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// String returns string representation of value.
|
||||
func (k *Key) String() string {
|
||||
return k.transformValue(k.value)
|
||||
}
|
||||
|
||||
// Validate accepts a validate function which can
|
||||
// return modifed result as key value.
|
||||
func (k *Key) Validate(fn func(string) string) string {
|
||||
return fn(k.String())
|
||||
}
|
||||
|
||||
// parseBool returns the boolean value represented by the string.
|
||||
//
|
||||
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
|
||||
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
|
||||
// Any other value returns an error.
|
||||
func parseBool(str string) (value bool, err error) {
|
||||
switch str {
|
||||
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
|
||||
return true, nil
|
||||
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
|
||||
}
|
||||
|
||||
// Bool returns bool type value.
|
||||
func (k *Key) Bool() (bool, error) {
|
||||
return parseBool(k.String())
|
||||
}
|
||||
|
||||
// Float64 returns float64 type value.
|
||||
func (k *Key) Float64() (float64, error) {
|
||||
return strconv.ParseFloat(k.String(), 64)
|
||||
}
|
||||
|
||||
// Int returns int type value.
|
||||
func (k *Key) Int() (int, error) {
|
||||
return strconv.Atoi(k.String())
|
||||
}
|
||||
|
||||
// Int64 returns int64 type value.
|
||||
func (k *Key) Int64() (int64, error) {
|
||||
return strconv.ParseInt(k.String(), 10, 64)
|
||||
}
|
||||
|
||||
// Uint returns uint type valued.
|
||||
func (k *Key) Uint() (uint, error) {
|
||||
u, e := strconv.ParseUint(k.String(), 10, 64)
|
||||
return uint(u), e
|
||||
}
|
||||
|
||||
// Uint64 returns uint64 type value.
|
||||
func (k *Key) Uint64() (uint64, error) {
|
||||
return strconv.ParseUint(k.String(), 10, 64)
|
||||
}
|
||||
|
||||
// Duration returns time.Duration type value.
|
||||
func (k *Key) Duration() (time.Duration, error) {
|
||||
return time.ParseDuration(k.String())
|
||||
}
|
||||
|
||||
// TimeFormat parses with given format and returns time.Time type value.
|
||||
func (k *Key) TimeFormat(format string) (time.Time, error) {
|
||||
return time.Parse(format, k.String())
|
||||
}
|
||||
|
||||
// Time parses with RFC3339 format and returns time.Time type value.
|
||||
func (k *Key) Time() (time.Time, error) {
|
||||
return k.TimeFormat(time.RFC3339)
|
||||
}
|
||||
|
||||
// MustString returns default value if key value is empty.
|
||||
func (k *Key) MustString(defaultVal string) string {
|
||||
val := k.String()
|
||||
if len(val) == 0 {
|
||||
k.value = defaultVal
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustBool always returns value without error,
|
||||
// it returns false if error occurs.
|
||||
func (k *Key) MustBool(defaultVal ...bool) bool {
|
||||
val, err := k.Bool()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatBool(defaultVal[0])
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustFloat64 always returns value without error,
|
||||
// it returns 0.0 if error occurs.
|
||||
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
|
||||
val, err := k.Float64()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustInt always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustInt(defaultVal ...int) int {
|
||||
val, err := k.Int()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustInt64 always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustInt64(defaultVal ...int64) int64 {
|
||||
val, err := k.Int64()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatInt(defaultVal[0], 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustUint always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustUint(defaultVal ...uint) uint {
|
||||
val, err := k.Uint()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustUint64 always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
|
||||
val, err := k.Uint64()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatUint(defaultVal[0], 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustDuration always returns value without error,
|
||||
// it returns zero value if error occurs.
|
||||
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
|
||||
val, err := k.Duration()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = defaultVal[0].String()
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustTimeFormat always parses with given format and returns value without error,
|
||||
// it returns zero value if error occurs.
|
||||
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
|
||||
val, err := k.TimeFormat(format)
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = defaultVal[0].Format(format)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustTime always parses with RFC3339 format and returns value without error,
|
||||
// it returns zero value if error occurs.
|
||||
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
|
||||
return k.MustTimeFormat(time.RFC3339, defaultVal...)
|
||||
}
|
||||
|
||||
// In always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) In(defaultVal string, candidates []string) string {
|
||||
val := k.String()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InFloat64 always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
|
||||
val := k.MustFloat64()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InInt always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InInt(defaultVal int, candidates []int) int {
|
||||
val := k.MustInt()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InInt64 always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
|
||||
val := k.MustInt64()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InUint always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
|
||||
val := k.MustUint()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InUint64 always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
|
||||
val := k.MustUint64()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InTimeFormat always parses with given format and returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
|
||||
val := k.MustTimeFormat(format)
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InTime always parses with RFC3339 format and returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
|
||||
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
|
||||
}
|
||||
|
||||
// RangeFloat64 checks if value is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
|
||||
val := k.MustFloat64()
|
||||
if val < min || val > max {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeInt checks if value is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeInt(defaultVal, min, max int) int {
|
||||
val := k.MustInt()
|
||||
if val < min || val > max {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeInt64 checks if value is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
|
||||
val := k.MustInt64()
|
||||
if val < min || val > max {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeTimeFormat checks if value with given format is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
|
||||
val := k.MustTimeFormat(format)
|
||||
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeTime checks if value with RFC3339 format is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
|
||||
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
|
||||
}
|
||||
|
||||
// Strings returns list of string divided by given delimiter.
|
||||
func (k *Key) Strings(delim string) []string {
|
||||
str := k.String()
|
||||
if len(str) == 0 {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
vals := strings.Split(str, delim)
|
||||
for i := range vals {
|
||||
// vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
|
||||
vals[i] = strings.TrimSpace(vals[i])
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// StringsWithShadows returns list of string divided by given delimiter.
|
||||
// Shadows will also be appended if any.
|
||||
func (k *Key) StringsWithShadows(delim string) []string {
|
||||
vals := k.ValueWithShadows()
|
||||
results := make([]string, 0, len(vals)*2)
|
||||
for i := range vals {
|
||||
if len(vals) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
results = append(results, strings.Split(vals[i], delim)...)
|
||||
}
|
||||
|
||||
for i := range results {
|
||||
results[i] = k.transformValue(strings.TrimSpace(results[i]))
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Float64s(delim string) []float64 {
|
||||
vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Ints(delim string) []int {
|
||||
vals, _ := k.parseInts(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Int64s(delim string) []int64 {
|
||||
vals, _ := k.parseInt64s(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Uints(delim string) []uint {
|
||||
vals, _ := k.parseUints(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Uint64s(delim string) []uint64 {
|
||||
vals, _ := k.parseUint64s(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
|
||||
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
|
||||
func (k *Key) TimesFormat(format, delim string) []time.Time {
|
||||
vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
|
||||
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
|
||||
func (k *Key) Times(delim string) []time.Time {
|
||||
return k.TimesFormat(time.RFC3339, delim)
|
||||
}
|
||||
|
||||
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
|
||||
// it will not be included to result list.
|
||||
func (k *Key) ValidFloat64s(delim string) []float64 {
|
||||
vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
|
||||
// not be included to result list.
|
||||
func (k *Key) ValidInts(delim string) []int {
|
||||
vals, _ := k.parseInts(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
|
||||
// then it will not be included to result list.
|
||||
func (k *Key) ValidInt64s(delim string) []int64 {
|
||||
vals, _ := k.parseInt64s(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
|
||||
// then it will not be included to result list.
|
||||
func (k *Key) ValidUints(delim string) []uint {
|
||||
vals, _ := k.parseUints(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
|
||||
// integer, then it will not be included to result list.
|
||||
func (k *Key) ValidUint64s(delim string) []uint64 {
|
||||
vals, _ := k.parseUint64s(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
|
||||
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
|
||||
vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
|
||||
func (k *Key) ValidTimes(delim string) []time.Time {
|
||||
return k.ValidTimesFormat(time.RFC3339, delim)
|
||||
}
|
||||
|
||||
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
|
||||
return k.parseFloat64s(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictInts(delim string) ([]int, error) {
|
||||
return k.parseInts(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
|
||||
return k.parseInt64s(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictUints(delim string) ([]uint, error) {
|
||||
return k.parseUints(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
|
||||
return k.parseUint64s(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
|
||||
// or error on first invalid input.
|
||||
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
|
||||
return k.parseTimesFormat(format, k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
|
||||
// or error on first invalid input.
|
||||
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
|
||||
return k.StrictTimesFormat(time.RFC3339, delim)
|
||||
}
|
||||
|
||||
// parseFloat64s transforms strings to float64s.
|
||||
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
|
||||
vals := make([]float64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseInts transforms strings to ints.
|
||||
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
|
||||
vals := make([]int, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.Atoi(str)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseInt64s transforms strings to int64s.
|
||||
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
|
||||
vals := make([]int64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseInt(str, 10, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseUints transforms strings to uints.
|
||||
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
|
||||
vals := make([]uint, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseUint(str, 10, 0)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, uint(val))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseUint64s transforms strings to uint64s.
|
||||
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
|
||||
vals := make([]uint64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseTimesFormat transforms strings to times in given format.
|
||||
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
|
||||
vals := make([]time.Time, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := time.Parse(format, str)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// SetValue changes key value.
|
||||
func (k *Key) SetValue(v string) {
|
||||
if k.s.f.BlockMode {
|
||||
k.s.f.lock.Lock()
|
||||
defer k.s.f.lock.Unlock()
|
||||
}
|
||||
|
||||
k.value = v
|
||||
k.s.keysHash[k.name] = v
|
||||
}
|
573
vendor/github.com/go-ini/ini/key_test.go
generated
vendored
Normal file
573
vendor/github.com/go-ini/ini/key_test.go
generated
vendored
Normal file
|
@ -0,0 +1,573 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func Test_Key(t *testing.T) {
|
||||
Convey("Test getting and setting values", t, func() {
|
||||
cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
Convey("Get values in default section", func() {
|
||||
sec := cfg.Section("")
|
||||
So(sec, ShouldNotBeNil)
|
||||
So(sec.Key("NAME").Value(), ShouldEqual, "ini")
|
||||
So(sec.Key("NAME").String(), ShouldEqual, "ini")
|
||||
So(sec.Key("NAME").Validate(func(in string) string {
|
||||
return in
|
||||
}), ShouldEqual, "ini")
|
||||
So(sec.Key("NAME").Comment, ShouldEqual, "; Package name")
|
||||
So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1")
|
||||
})
|
||||
|
||||
Convey("Get values in non-default section", func() {
|
||||
sec := cfg.Section("author")
|
||||
So(sec, ShouldNotBeNil)
|
||||
So(sec.Key("NAME").String(), ShouldEqual, "Unknwon")
|
||||
So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon")
|
||||
|
||||
sec = cfg.Section("package")
|
||||
So(sec, ShouldNotBeNil)
|
||||
So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1")
|
||||
})
|
||||
|
||||
Convey("Get auto-increment key names", func() {
|
||||
keys := cfg.Section("features").Keys()
|
||||
for i, k := range keys {
|
||||
So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1))
|
||||
}
|
||||
})
|
||||
|
||||
Convey("Get parent-keys that are available to the child section", func() {
|
||||
parentKeys := cfg.Section("package.sub").ParentKeys()
|
||||
for _, k := range parentKeys {
|
||||
So(k.Name(), ShouldEqual, "CLONE_URL")
|
||||
}
|
||||
})
|
||||
|
||||
Convey("Get overwrite value", func() {
|
||||
So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io")
|
||||
})
|
||||
|
||||
Convey("Get sections", func() {
|
||||
sections := cfg.Sections()
|
||||
for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} {
|
||||
So(sections[i].Name(), ShouldEqual, name)
|
||||
}
|
||||
})
|
||||
|
||||
Convey("Get parent section value", func() {
|
||||
So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1")
|
||||
So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1")
|
||||
})
|
||||
|
||||
Convey("Get multiple line value", func() {
|
||||
So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n")
|
||||
})
|
||||
|
||||
Convey("Get values with type", func() {
|
||||
sec := cfg.Section("types")
|
||||
v1, err := sec.Key("BOOL").Bool()
|
||||
So(err, ShouldBeNil)
|
||||
So(v1, ShouldBeTrue)
|
||||
|
||||
v1, err = sec.Key("BOOL_FALSE").Bool()
|
||||
So(err, ShouldBeNil)
|
||||
So(v1, ShouldBeFalse)
|
||||
|
||||
v2, err := sec.Key("FLOAT64").Float64()
|
||||
So(err, ShouldBeNil)
|
||||
So(v2, ShouldEqual, 1.25)
|
||||
|
||||
v3, err := sec.Key("INT").Int()
|
||||
So(err, ShouldBeNil)
|
||||
So(v3, ShouldEqual, 10)
|
||||
|
||||
v4, err := sec.Key("INT").Int64()
|
||||
So(err, ShouldBeNil)
|
||||
So(v4, ShouldEqual, 10)
|
||||
|
||||
v5, err := sec.Key("UINT").Uint()
|
||||
So(err, ShouldBeNil)
|
||||
So(v5, ShouldEqual, 3)
|
||||
|
||||
v6, err := sec.Key("UINT").Uint64()
|
||||
So(err, ShouldBeNil)
|
||||
So(v6, ShouldEqual, 3)
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
v7, err := sec.Key("TIME").Time()
|
||||
So(err, ShouldBeNil)
|
||||
So(v7.String(), ShouldEqual, t.String())
|
||||
|
||||
Convey("Must get values with type", func() {
|
||||
So(sec.Key("STRING").MustString("404"), ShouldEqual, "str")
|
||||
So(sec.Key("BOOL").MustBool(), ShouldBeTrue)
|
||||
So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25)
|
||||
So(sec.Key("INT").MustInt(), ShouldEqual, 10)
|
||||
So(sec.Key("INT").MustInt64(), ShouldEqual, 10)
|
||||
So(sec.Key("UINT").MustUint(), ShouldEqual, 3)
|
||||
So(sec.Key("UINT").MustUint64(), ShouldEqual, 3)
|
||||
So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String())
|
||||
|
||||
dur, err := time.ParseDuration("2h45m")
|
||||
So(err, ShouldBeNil)
|
||||
So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds())
|
||||
|
||||
Convey("Must get values with default value", func() {
|
||||
So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404")
|
||||
So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue)
|
||||
So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5)
|
||||
So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15)
|
||||
So(sec.Key("INT64_404").MustInt64(15), ShouldEqual, 15)
|
||||
So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6)
|
||||
So(sec.Key("UINT64_404").MustUint64(6), ShouldEqual, 6)
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String())
|
||||
|
||||
So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds())
|
||||
|
||||
Convey("Must should set default as key value", func() {
|
||||
So(sec.Key("STRING_404").String(), ShouldEqual, "404")
|
||||
So(sec.Key("BOOL_404").String(), ShouldEqual, "true")
|
||||
So(sec.Key("FLOAT64_404").String(), ShouldEqual, "2.5")
|
||||
So(sec.Key("INT_404").String(), ShouldEqual, "15")
|
||||
So(sec.Key("INT64_404").String(), ShouldEqual, "15")
|
||||
So(sec.Key("UINT_404").String(), ShouldEqual, "6")
|
||||
So(sec.Key("UINT64_404").String(), ShouldEqual, "6")
|
||||
So(sec.Key("TIME_404").String(), ShouldEqual, "2014-01-01T20:17:05Z")
|
||||
So(sec.Key("DURATION_404").String(), ShouldEqual, "2h45m0s")
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Get value with candidates", func() {
|
||||
sec := cfg.Section("types")
|
||||
So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str")
|
||||
So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25)
|
||||
So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10)
|
||||
So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10)
|
||||
So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3)
|
||||
So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3)
|
||||
|
||||
zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z")
|
||||
So(err, ShouldBeNil)
|
||||
t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String())
|
||||
|
||||
Convey("Get value with candidates and default value", func() {
|
||||
So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str")
|
||||
So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25)
|
||||
So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10)
|
||||
So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10)
|
||||
So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3)
|
||||
So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3)
|
||||
So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String())
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Get values in range", func() {
|
||||
sec := cfg.Section("types")
|
||||
So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25)
|
||||
So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10)
|
||||
So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10)
|
||||
|
||||
minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z")
|
||||
So(err, ShouldBeNil)
|
||||
midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z")
|
||||
So(err, ShouldBeNil)
|
||||
maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z")
|
||||
So(err, ShouldBeNil)
|
||||
t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String())
|
||||
|
||||
Convey("Get value in range with default value", func() {
|
||||
So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5)
|
||||
So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7)
|
||||
So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7)
|
||||
So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String())
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Get values into slice", func() {
|
||||
sec := cfg.Section("array")
|
||||
So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de")
|
||||
So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0)
|
||||
|
||||
vals1 := sec.Key("FLOAT64S").Float64s(",")
|
||||
float64sEqual(vals1, 1.1, 2.2, 3.3)
|
||||
|
||||
vals2 := sec.Key("INTS").Ints(",")
|
||||
intsEqual(vals2, 1, 2, 3)
|
||||
|
||||
vals3 := sec.Key("INTS").Int64s(",")
|
||||
int64sEqual(vals3, 1, 2, 3)
|
||||
|
||||
vals4 := sec.Key("UINTS").Uints(",")
|
||||
uintsEqual(vals4, 1, 2, 3)
|
||||
|
||||
vals5 := sec.Key("UINTS").Uint64s(",")
|
||||
uint64sEqual(vals5, 1, 2, 3)
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
vals6 := sec.Key("TIMES").Times(",")
|
||||
timesEqual(vals6, t, t, t)
|
||||
})
|
||||
|
||||
Convey("Get valid values into slice", func() {
|
||||
sec := cfg.Section("array")
|
||||
vals1 := sec.Key("FLOAT64S").ValidFloat64s(",")
|
||||
float64sEqual(vals1, 1.1, 2.2, 3.3)
|
||||
|
||||
vals2 := sec.Key("INTS").ValidInts(",")
|
||||
intsEqual(vals2, 1, 2, 3)
|
||||
|
||||
vals3 := sec.Key("INTS").ValidInt64s(",")
|
||||
int64sEqual(vals3, 1, 2, 3)
|
||||
|
||||
vals4 := sec.Key("UINTS").ValidUints(",")
|
||||
uintsEqual(vals4, 1, 2, 3)
|
||||
|
||||
vals5 := sec.Key("UINTS").ValidUint64s(",")
|
||||
uint64sEqual(vals5, 1, 2, 3)
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
vals6 := sec.Key("TIMES").ValidTimes(",")
|
||||
timesEqual(vals6, t, t, t)
|
||||
})
|
||||
|
||||
Convey("Get values one type into slice of another type", func() {
|
||||
sec := cfg.Section("array")
|
||||
vals1 := sec.Key("STRINGS").ValidFloat64s(",")
|
||||
So(vals1, ShouldBeEmpty)
|
||||
|
||||
vals2 := sec.Key("STRINGS").ValidInts(",")
|
||||
So(vals2, ShouldBeEmpty)
|
||||
|
||||
vals3 := sec.Key("STRINGS").ValidInt64s(",")
|
||||
So(vals3, ShouldBeEmpty)
|
||||
|
||||
vals4 := sec.Key("STRINGS").ValidUints(",")
|
||||
So(vals4, ShouldBeEmpty)
|
||||
|
||||
vals5 := sec.Key("STRINGS").ValidUint64s(",")
|
||||
So(vals5, ShouldBeEmpty)
|
||||
|
||||
vals6 := sec.Key("STRINGS").ValidTimes(",")
|
||||
So(vals6, ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("Get valid values into slice without errors", func() {
|
||||
sec := cfg.Section("array")
|
||||
vals1, err := sec.Key("FLOAT64S").StrictFloat64s(",")
|
||||
So(err, ShouldBeNil)
|
||||
float64sEqual(vals1, 1.1, 2.2, 3.3)
|
||||
|
||||
vals2, err := sec.Key("INTS").StrictInts(",")
|
||||
So(err, ShouldBeNil)
|
||||
intsEqual(vals2, 1, 2, 3)
|
||||
|
||||
vals3, err := sec.Key("INTS").StrictInt64s(",")
|
||||
So(err, ShouldBeNil)
|
||||
int64sEqual(vals3, 1, 2, 3)
|
||||
|
||||
vals4, err := sec.Key("UINTS").StrictUints(",")
|
||||
So(err, ShouldBeNil)
|
||||
uintsEqual(vals4, 1, 2, 3)
|
||||
|
||||
vals5, err := sec.Key("UINTS").StrictUint64s(",")
|
||||
So(err, ShouldBeNil)
|
||||
uint64sEqual(vals5, 1, 2, 3)
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
vals6, err := sec.Key("TIMES").StrictTimes(",")
|
||||
So(err, ShouldBeNil)
|
||||
timesEqual(vals6, t, t, t)
|
||||
})
|
||||
|
||||
Convey("Get invalid values into slice", func() {
|
||||
sec := cfg.Section("array")
|
||||
vals1, err := sec.Key("STRINGS").StrictFloat64s(",")
|
||||
So(vals1, ShouldBeEmpty)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
vals2, err := sec.Key("STRINGS").StrictInts(",")
|
||||
So(vals2, ShouldBeEmpty)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
vals3, err := sec.Key("STRINGS").StrictInt64s(",")
|
||||
So(vals3, ShouldBeEmpty)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
vals4, err := sec.Key("STRINGS").StrictUints(",")
|
||||
So(vals4, ShouldBeEmpty)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
vals5, err := sec.Key("STRINGS").StrictUint64s(",")
|
||||
So(vals5, ShouldBeEmpty)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
vals6, err := sec.Key("STRINGS").StrictTimes(",")
|
||||
So(vals6, ShouldBeEmpty)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Get key hash", func() {
|
||||
cfg.Section("").KeysHash()
|
||||
})
|
||||
|
||||
Convey("Set key value", func() {
|
||||
k := cfg.Section("author").Key("NAME")
|
||||
k.SetValue("无闻")
|
||||
So(k.String(), ShouldEqual, "无闻")
|
||||
})
|
||||
|
||||
Convey("Get key strings", func() {
|
||||
So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT")
|
||||
})
|
||||
|
||||
Convey("Delete a key", func() {
|
||||
cfg.Section("package.sub").DeleteKey("UNUSED_KEY")
|
||||
_, err := cfg.Section("package.sub").GetKey("UNUSED_KEY")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Has Key (backwards compatible)", func() {
|
||||
sec := cfg.Section("package.sub")
|
||||
haskey1 := sec.Haskey("UNUSED_KEY")
|
||||
haskey2 := sec.Haskey("CLONE_URL")
|
||||
haskey3 := sec.Haskey("CLONE_URL_NO")
|
||||
So(haskey1, ShouldBeTrue)
|
||||
So(haskey2, ShouldBeTrue)
|
||||
So(haskey3, ShouldBeFalse)
|
||||
})
|
||||
|
||||
Convey("Has Key", func() {
|
||||
sec := cfg.Section("package.sub")
|
||||
haskey1 := sec.HasKey("UNUSED_KEY")
|
||||
haskey2 := sec.HasKey("CLONE_URL")
|
||||
haskey3 := sec.HasKey("CLONE_URL_NO")
|
||||
So(haskey1, ShouldBeTrue)
|
||||
So(haskey2, ShouldBeTrue)
|
||||
So(haskey3, ShouldBeFalse)
|
||||
})
|
||||
|
||||
Convey("Has Value", func() {
|
||||
sec := cfg.Section("author")
|
||||
hasvalue1 := sec.HasValue("Unknwon")
|
||||
hasvalue2 := sec.HasValue("doc")
|
||||
So(hasvalue1, ShouldBeTrue)
|
||||
So(hasvalue2, ShouldBeFalse)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Test getting and setting bad values", t, func() {
|
||||
cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
Convey("Create new key with empty name", func() {
|
||||
k, err := cfg.Section("").NewKey("", "")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(k, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("Create new section with empty name", func() {
|
||||
s, err := cfg.NewSection("")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(s, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("Create new sections with empty name", func() {
|
||||
So(cfg.NewSections(""), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Get section that not exists", func() {
|
||||
s, err := cfg.GetSection("404")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(s, ShouldBeNil)
|
||||
|
||||
s = cfg.Section("404")
|
||||
So(s, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Test key hash clone", t, func() {
|
||||
cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1)))
|
||||
So(err, ShouldBeNil)
|
||||
for _, v := range cfg.Section("").KeysHash() {
|
||||
So(len(v), ShouldBeGreaterThan, 0)
|
||||
}
|
||||
})
|
||||
|
||||
Convey("Key has empty value", t, func() {
|
||||
_conf := `key1=
|
||||
key2= ; comment`
|
||||
cfg, err := Load([]byte(_conf))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty)
|
||||
})
|
||||
}
|
||||
|
||||
const _CONF_GIT_CONFIG = `
|
||||
[remote "origin"]
|
||||
url = https://github.com/Antergone/test1.git
|
||||
url = https://github.com/Antergone/test2.git
|
||||
`
|
||||
|
||||
func Test_Key_Shadows(t *testing.T) {
|
||||
Convey("Shadows keys", t, func() {
|
||||
Convey("Disable shadows", func() {
|
||||
cfg, err := Load([]byte(_CONF_GIT_CONFIG))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test2.git")
|
||||
})
|
||||
|
||||
Convey("Enable shadows", func() {
|
||||
cfg, err := ShadowLoad([]byte(_CONF_GIT_CONFIG))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test1.git")
|
||||
So(strings.Join(cfg.Section(`remote "origin"`).Key("url").ValueWithShadows(), " "), ShouldEqual,
|
||||
"https://github.com/Antergone/test1.git https://github.com/Antergone/test2.git")
|
||||
|
||||
Convey("Save with shadows", func() {
|
||||
var buf bytes.Buffer
|
||||
_, err := cfg.WriteTo(&buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(buf.String(), ShouldEqual, `[remote "origin"]
|
||||
url = https://github.com/Antergone/test1.git
|
||||
url = https://github.com/Antergone/test2.git
|
||||
|
||||
`)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func newTestFile(block bool) *File {
|
||||
c, _ := Load([]byte(_CONF_DATA))
|
||||
c.BlockMode = block
|
||||
return c
|
||||
}
|
||||
|
||||
func Benchmark_Key_Value(b *testing.B) {
|
||||
c := newTestFile(true)
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Section("").Key("NAME").Value()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_Value_NonBlock(b *testing.B) {
|
||||
c := newTestFile(false)
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Section("").Key("NAME").Value()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_Value_ViaSection(b *testing.B) {
|
||||
c := newTestFile(true)
|
||||
sec := c.Section("")
|
||||
for i := 0; i < b.N; i++ {
|
||||
sec.Key("NAME").Value()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_Value_ViaSection_NonBlock(b *testing.B) {
|
||||
c := newTestFile(false)
|
||||
sec := c.Section("")
|
||||
for i := 0; i < b.N; i++ {
|
||||
sec.Key("NAME").Value()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_Value_Direct(b *testing.B) {
|
||||
c := newTestFile(true)
|
||||
key := c.Section("").Key("NAME")
|
||||
for i := 0; i < b.N; i++ {
|
||||
key.Value()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_Value_Direct_NonBlock(b *testing.B) {
|
||||
c := newTestFile(false)
|
||||
key := c.Section("").Key("NAME")
|
||||
for i := 0; i < b.N; i++ {
|
||||
key.Value()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_String(b *testing.B) {
|
||||
c := newTestFile(true)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = c.Section("").Key("NAME").String()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_String_NonBlock(b *testing.B) {
|
||||
c := newTestFile(false)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = c.Section("").Key("NAME").String()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_String_ViaSection(b *testing.B) {
|
||||
c := newTestFile(true)
|
||||
sec := c.Section("")
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = sec.Key("NAME").String()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_String_ViaSection_NonBlock(b *testing.B) {
|
||||
c := newTestFile(false)
|
||||
sec := c.Section("")
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = sec.Key("NAME").String()
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_SetValue(b *testing.B) {
|
||||
c := newTestFile(true)
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Section("").Key("NAME").SetValue("10")
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_Key_SetValue_VisSection(b *testing.B) {
|
||||
c := newTestFile(true)
|
||||
sec := c.Section("")
|
||||
for i := 0; i < b.N; i++ {
|
||||
sec.Key("NAME").SetValue("10")
|
||||
}
|
||||
}
|
361
vendor/github.com/go-ini/ini/parser.go
generated
vendored
Normal file
361
vendor/github.com/go-ini/ini/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,361 @@
|
|||
// Copyright 2015 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type tokenType int
|
||||
|
||||
const (
|
||||
_TOKEN_INVALID tokenType = iota
|
||||
_TOKEN_COMMENT
|
||||
_TOKEN_SECTION
|
||||
_TOKEN_KEY
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
buf *bufio.Reader
|
||||
isEOF bool
|
||||
count int
|
||||
comment *bytes.Buffer
|
||||
}
|
||||
|
||||
func newParser(r io.Reader) *parser {
|
||||
return &parser{
|
||||
buf: bufio.NewReader(r),
|
||||
count: 1,
|
||||
comment: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
|
||||
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
|
||||
func (p *parser) BOM() error {
|
||||
mask, err := p.buf.Peek(2)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
} else if len(mask) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case mask[0] == 254 && mask[1] == 255:
|
||||
fallthrough
|
||||
case mask[0] == 255 && mask[1] == 254:
|
||||
p.buf.Read(mask)
|
||||
case mask[0] == 239 && mask[1] == 187:
|
||||
mask, err := p.buf.Peek(3)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
} else if len(mask) < 3 {
|
||||
return nil
|
||||
}
|
||||
if mask[2] == 191 {
|
||||
p.buf.Read(mask)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) readUntil(delim byte) ([]byte, error) {
|
||||
data, err := p.buf.ReadBytes(delim)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
p.isEOF = true
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func cleanComment(in []byte) ([]byte, bool) {
|
||||
i := bytes.IndexAny(in, "#;")
|
||||
if i == -1 {
|
||||
return nil, false
|
||||
}
|
||||
return in[i:], true
|
||||
}
|
||||
|
||||
func readKeyName(in []byte) (string, int, error) {
|
||||
line := string(in)
|
||||
|
||||
// Check if key name surrounded by quotes.
|
||||
var keyQuote string
|
||||
if line[0] == '"' {
|
||||
if len(line) > 6 && string(line[0:3]) == `"""` {
|
||||
keyQuote = `"""`
|
||||
} else {
|
||||
keyQuote = `"`
|
||||
}
|
||||
} else if line[0] == '`' {
|
||||
keyQuote = "`"
|
||||
}
|
||||
|
||||
// Get out key name
|
||||
endIdx := -1
|
||||
if len(keyQuote) > 0 {
|
||||
startIdx := len(keyQuote)
|
||||
// FIXME: fail case -> """"""name"""=value
|
||||
pos := strings.Index(line[startIdx:], keyQuote)
|
||||
if pos == -1 {
|
||||
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
|
||||
}
|
||||
pos += startIdx
|
||||
|
||||
// Find key-value delimiter
|
||||
i := strings.IndexAny(line[pos+startIdx:], "=:")
|
||||
if i < 0 {
|
||||
return "", -1, ErrDelimiterNotFound{line}
|
||||
}
|
||||
endIdx = pos + i
|
||||
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
|
||||
}
|
||||
|
||||
endIdx = strings.IndexAny(line, "=:")
|
||||
if endIdx < 0 {
|
||||
return "", -1, ErrDelimiterNotFound{line}
|
||||
}
|
||||
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
|
||||
}
|
||||
|
||||
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
|
||||
for {
|
||||
data, err := p.readUntil('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
next := string(data)
|
||||
|
||||
pos := strings.LastIndex(next, valQuote)
|
||||
if pos > -1 {
|
||||
val += next[:pos]
|
||||
|
||||
comment, has := cleanComment([]byte(next[pos:]))
|
||||
if has {
|
||||
p.comment.Write(bytes.TrimSpace(comment))
|
||||
}
|
||||
break
|
||||
}
|
||||
val += next
|
||||
if p.isEOF {
|
||||
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
|
||||
}
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (p *parser) readContinuationLines(val string) (string, error) {
|
||||
for {
|
||||
data, err := p.readUntil('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
next := strings.TrimSpace(string(data))
|
||||
|
||||
if len(next) == 0 {
|
||||
break
|
||||
}
|
||||
val += next
|
||||
if val[len(val)-1] != '\\' {
|
||||
break
|
||||
}
|
||||
val = val[:len(val)-1]
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// hasSurroundedQuote check if and only if the first and last characters
|
||||
// are quotes \" or \'.
|
||||
// It returns false if any other parts also contain same kind of quotes.
|
||||
func hasSurroundedQuote(in string, quote byte) bool {
|
||||
return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
|
||||
strings.IndexByte(in[1:], quote) == len(in)-2
|
||||
}
|
||||
|
||||
func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
|
||||
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
|
||||
if len(line) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var valQuote string
|
||||
if len(line) > 3 && string(line[0:3]) == `"""` {
|
||||
valQuote = `"""`
|
||||
} else if line[0] == '`' {
|
||||
valQuote = "`"
|
||||
}
|
||||
|
||||
if len(valQuote) > 0 {
|
||||
startIdx := len(valQuote)
|
||||
pos := strings.LastIndex(line[startIdx:], valQuote)
|
||||
// Check for multi-line value
|
||||
if pos == -1 {
|
||||
return p.readMultilines(line, line[startIdx:], valQuote)
|
||||
}
|
||||
|
||||
return line[startIdx : pos+startIdx], nil
|
||||
}
|
||||
|
||||
// Won't be able to reach here if value only contains whitespace
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Check continuation lines when desired
|
||||
if !ignoreContinuation && line[len(line)-1] == '\\' {
|
||||
return p.readContinuationLines(line[:len(line)-1])
|
||||
}
|
||||
|
||||
// Check if ignore inline comment
|
||||
if !ignoreInlineComment {
|
||||
i := strings.IndexAny(line, "#;")
|
||||
if i > -1 {
|
||||
p.comment.WriteString(line[i:])
|
||||
line = strings.TrimSpace(line[:i])
|
||||
}
|
||||
}
|
||||
|
||||
// Trim single quotes
|
||||
if hasSurroundedQuote(line, '\'') ||
|
||||
hasSurroundedQuote(line, '"') {
|
||||
line = line[1 : len(line)-1]
|
||||
}
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// parse parses data through an io.Reader.
|
||||
func (f *File) parse(reader io.Reader) (err error) {
|
||||
p := newParser(reader)
|
||||
if err = p.BOM(); err != nil {
|
||||
return fmt.Errorf("BOM: %v", err)
|
||||
}
|
||||
|
||||
// Ignore error because default section name is never empty string.
|
||||
section, _ := f.NewSection(DEFAULT_SECTION)
|
||||
|
||||
var line []byte
|
||||
var inUnparseableSection bool
|
||||
for !p.isEOF {
|
||||
line, err = p.readUntil('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Comments
|
||||
if line[0] == '#' || line[0] == ';' {
|
||||
// Note: we do not care ending line break,
|
||||
// it is needed for adding second line,
|
||||
// so just clean it once at the end when set to value.
|
||||
p.comment.Write(line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Section
|
||||
if line[0] == '[' {
|
||||
// Read to the next ']' (TODO: support quoted strings)
|
||||
// TODO(unknwon): use LastIndexByte when stop supporting Go1.4
|
||||
closeIdx := bytes.LastIndex(line, []byte("]"))
|
||||
if closeIdx == -1 {
|
||||
return fmt.Errorf("unclosed section: %s", line)
|
||||
}
|
||||
|
||||
name := string(line[1:closeIdx])
|
||||
section, err = f.NewSection(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
comment, has := cleanComment(line[closeIdx+1:])
|
||||
if has {
|
||||
p.comment.Write(comment)
|
||||
}
|
||||
|
||||
section.Comment = strings.TrimSpace(p.comment.String())
|
||||
|
||||
// Reset aotu-counter and comments
|
||||
p.comment.Reset()
|
||||
p.count = 1
|
||||
|
||||
inUnparseableSection = false
|
||||
for i := range f.options.UnparseableSections {
|
||||
if f.options.UnparseableSections[i] == name ||
|
||||
(f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
|
||||
inUnparseableSection = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if inUnparseableSection {
|
||||
section.isRawSection = true
|
||||
section.rawBody += string(line)
|
||||
continue
|
||||
}
|
||||
|
||||
kname, offset, err := readKeyName(line)
|
||||
if err != nil {
|
||||
// Treat as boolean key when desired, and whole line is key name.
|
||||
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
|
||||
kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key, err := section.NewBooleanKey(kname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Auto increment.
|
||||
isAutoIncr := false
|
||||
if kname == "-" {
|
||||
isAutoIncr = true
|
||||
kname = "#" + strconv.Itoa(p.count)
|
||||
p.count++
|
||||
}
|
||||
|
||||
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := section.NewKey(kname, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.isAutoIncrement = isAutoIncr
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
}
|
||||
return nil
|
||||
}
|
42
vendor/github.com/go-ini/ini/parser_test.go
generated
vendored
Normal file
42
vendor/github.com/go-ini/ini/parser_test.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2016 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func Test_BOM(t *testing.T) {
|
||||
Convey("Test handling BOM", t, func() {
|
||||
Convey("UTF-8-BOM", func() {
|
||||
cfg, err := Load("testdata/UTF-8-BOM.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io")
|
||||
})
|
||||
|
||||
Convey("UTF-16-LE-BOM", func() {
|
||||
cfg, err := Load("testdata/UTF-16-LE-BOM.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("UTF-16-BE-BOM", func() {
|
||||
})
|
||||
})
|
||||
}
|
248
vendor/github.com/go-ini/ini/section.go
generated
vendored
Normal file
248
vendor/github.com/go-ini/ini/section.go
generated
vendored
Normal file
|
@ -0,0 +1,248 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Section represents a config section.
|
||||
type Section struct {
|
||||
f *File
|
||||
Comment string
|
||||
name string
|
||||
keys map[string]*Key
|
||||
keyList []string
|
||||
keysHash map[string]string
|
||||
|
||||
isRawSection bool
|
||||
rawBody string
|
||||
}
|
||||
|
||||
func newSection(f *File, name string) *Section {
|
||||
return &Section{
|
||||
f: f,
|
||||
name: name,
|
||||
keys: make(map[string]*Key),
|
||||
keyList: make([]string, 0, 10),
|
||||
keysHash: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns name of Section.
|
||||
func (s *Section) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// Body returns rawBody of Section if the section was marked as unparseable.
|
||||
// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
|
||||
func (s *Section) Body() string {
|
||||
return strings.TrimSpace(s.rawBody)
|
||||
}
|
||||
|
||||
// NewKey creates a new key to given section.
|
||||
func (s *Section) NewKey(name, val string) (*Key, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new key: empty key name")
|
||||
} else if s.f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.Lock()
|
||||
defer s.f.lock.Unlock()
|
||||
}
|
||||
|
||||
if inSlice(name, s.keyList) {
|
||||
if s.f.options.AllowShadows {
|
||||
if err := s.keys[name].addShadow(val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
s.keys[name].value = val
|
||||
}
|
||||
return s.keys[name], nil
|
||||
}
|
||||
|
||||
s.keyList = append(s.keyList, name)
|
||||
s.keys[name] = newKey(s, name, val)
|
||||
s.keysHash[name] = val
|
||||
return s.keys[name], nil
|
||||
}
|
||||
|
||||
// NewBooleanKey creates a new boolean type key to given section.
|
||||
func (s *Section) NewBooleanKey(name string) (*Key, error) {
|
||||
key, err := s.NewKey(name, "true")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key.isBooleanType = true
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// GetKey returns key in section by given name.
|
||||
func (s *Section) GetKey(name string) (*Key, error) {
|
||||
// FIXME: change to section level lock?
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RLock()
|
||||
}
|
||||
if s.f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
key := s.keys[name]
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RUnlock()
|
||||
}
|
||||
|
||||
if key == nil {
|
||||
// Check if it is a child-section.
|
||||
sname := s.name
|
||||
for {
|
||||
if i := strings.LastIndex(sname, "."); i > -1 {
|
||||
sname = sname[:i]
|
||||
sec, err := s.f.GetSection(sname)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return sec.GetKey(name)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// HasKey returns true if section contains a key with given name.
|
||||
func (s *Section) HasKey(name string) bool {
|
||||
key, _ := s.GetKey(name)
|
||||
return key != nil
|
||||
}
|
||||
|
||||
// Haskey is a backwards-compatible name for HasKey.
|
||||
func (s *Section) Haskey(name string) bool {
|
||||
return s.HasKey(name)
|
||||
}
|
||||
|
||||
// HasValue returns true if section contains given raw value.
|
||||
func (s *Section) HasValue(value string) bool {
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RLock()
|
||||
defer s.f.lock.RUnlock()
|
||||
}
|
||||
|
||||
for _, k := range s.keys {
|
||||
if value == k.value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Key assumes named Key exists in section and returns a zero-value when not.
|
||||
func (s *Section) Key(name string) *Key {
|
||||
key, err := s.GetKey(name)
|
||||
if err != nil {
|
||||
// It's OK here because the only possible error is empty key name,
|
||||
// but if it's empty, this piece of code won't be executed.
|
||||
key, _ = s.NewKey(name, "")
|
||||
return key
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// Keys returns list of keys of section.
|
||||
func (s *Section) Keys() []*Key {
|
||||
keys := make([]*Key, len(s.keyList))
|
||||
for i := range s.keyList {
|
||||
keys[i] = s.Key(s.keyList[i])
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// ParentKeys returns list of keys of parent section.
|
||||
func (s *Section) ParentKeys() []*Key {
|
||||
var parentKeys []*Key
|
||||
sname := s.name
|
||||
for {
|
||||
if i := strings.LastIndex(sname, "."); i > -1 {
|
||||
sname = sname[:i]
|
||||
sec, err := s.f.GetSection(sname)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
parentKeys = append(parentKeys, sec.Keys()...)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return parentKeys
|
||||
}
|
||||
|
||||
// KeyStrings returns list of key names of section.
|
||||
func (s *Section) KeyStrings() []string {
|
||||
list := make([]string, len(s.keyList))
|
||||
copy(list, s.keyList)
|
||||
return list
|
||||
}
|
||||
|
||||
// KeysHash returns keys hash consisting of names and values.
|
||||
func (s *Section) KeysHash() map[string]string {
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RLock()
|
||||
defer s.f.lock.RUnlock()
|
||||
}
|
||||
|
||||
hash := map[string]string{}
|
||||
for key, value := range s.keysHash {
|
||||
hash[key] = value
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// DeleteKey deletes a key from section.
|
||||
func (s *Section) DeleteKey(name string) {
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.Lock()
|
||||
defer s.f.lock.Unlock()
|
||||
}
|
||||
|
||||
for i, k := range s.keyList {
|
||||
if k == name {
|
||||
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
|
||||
delete(s.keys, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ChildSections returns a list of child sections of current section.
|
||||
// For example, "[parent.child1]" and "[parent.child12]" are child sections
|
||||
// of section "[parent]".
|
||||
func (s *Section) ChildSections() []*Section {
|
||||
prefix := s.name + "."
|
||||
children := make([]*Section, 0, 3)
|
||||
for _, name := range s.f.sectionList {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
children = append(children, s.f.sections[name])
|
||||
}
|
||||
}
|
||||
return children
|
||||
}
|
75
vendor/github.com/go-ini/ini/section_test.go
generated
vendored
Normal file
75
vendor/github.com/go-ini/ini/section_test.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func Test_Section(t *testing.T) {
|
||||
Convey("Test CRD sections", t, func() {
|
||||
cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
Convey("Get section strings", func() {
|
||||
So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance")
|
||||
})
|
||||
|
||||
Convey("Delete a section", func() {
|
||||
cfg.DeleteSection("")
|
||||
So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION)
|
||||
})
|
||||
|
||||
Convey("Create new sections", func() {
|
||||
cfg.NewSections("test", "test2")
|
||||
_, err := cfg.GetSection("test")
|
||||
So(err, ShouldBeNil)
|
||||
_, err = cfg.GetSection("test2")
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func Test_SectionRaw(t *testing.T) {
|
||||
Convey("Test section raw string", t, func() {
|
||||
cfg, err := LoadSources(
|
||||
LoadOptions{
|
||||
Insensitive: true,
|
||||
UnparseableSections: []string{"core_lesson", "comments"},
|
||||
},
|
||||
"testdata/aicc.ini")
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
Convey("Get section strings", func() {
|
||||
So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,core,core_lesson,comments")
|
||||
})
|
||||
|
||||
Convey("Validate non-raw section", func() {
|
||||
val, err := cfg.Section("core").GetKey("lesson_status")
|
||||
So(err, ShouldBeNil)
|
||||
So(val.String(), ShouldEqual, "C")
|
||||
})
|
||||
|
||||
Convey("Validate raw section", func() {
|
||||
So(cfg.Section("core_lesson").Body(), ShouldEqual, `my lesson state data – 1111111111111111111000000000000000001110000
|
||||
111111111111111111100000000000111000000000 – end my lesson state data`)
|
||||
})
|
||||
})
|
||||
}
|
499
vendor/github.com/go-ini/ini/struct.go
generated
vendored
Normal file
499
vendor/github.com/go-ini/ini/struct.go
generated
vendored
Normal file
|
@ -0,0 +1,499 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// NameMapper represents a ini tag name mapper.
|
||||
type NameMapper func(string) string
|
||||
|
||||
// Built-in name getters.
|
||||
var (
|
||||
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
|
||||
AllCapsUnderscore NameMapper = func(raw string) string {
|
||||
newstr := make([]rune, 0, len(raw))
|
||||
for i, chr := range raw {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if i > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
}
|
||||
newstr = append(newstr, unicode.ToUpper(chr))
|
||||
}
|
||||
return string(newstr)
|
||||
}
|
||||
// TitleUnderscore converts to format title_underscore.
|
||||
TitleUnderscore NameMapper = func(raw string) string {
|
||||
newstr := make([]rune, 0, len(raw))
|
||||
for i, chr := range raw {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if i > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
chr -= ('A' - 'a')
|
||||
}
|
||||
newstr = append(newstr, chr)
|
||||
}
|
||||
return string(newstr)
|
||||
}
|
||||
)
|
||||
|
||||
func (s *Section) parseFieldName(raw, actual string) string {
|
||||
if len(actual) > 0 {
|
||||
return actual
|
||||
}
|
||||
if s.f.NameMapper != nil {
|
||||
return s.f.NameMapper(raw)
|
||||
}
|
||||
return raw
|
||||
}
|
||||
|
||||
func parseDelim(actual string) string {
|
||||
if len(actual) > 0 {
|
||||
return actual
|
||||
}
|
||||
return ","
|
||||
}
|
||||
|
||||
var reflectTime = reflect.TypeOf(time.Now()).Kind()
|
||||
|
||||
// setSliceWithProperType sets proper values to slice based on its type.
|
||||
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||
var strs []string
|
||||
if allowShadow {
|
||||
strs = key.StringsWithShadows(delim)
|
||||
} else {
|
||||
strs = key.Strings(delim)
|
||||
}
|
||||
|
||||
numVals := len(strs)
|
||||
if numVals == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var vals interface{}
|
||||
var err error
|
||||
|
||||
sliceOf := field.Type().Elem().Kind()
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
vals = strs
|
||||
case reflect.Int:
|
||||
vals, err = key.parseInts(strs, true, false)
|
||||
case reflect.Int64:
|
||||
vals, err = key.parseInt64s(strs, true, false)
|
||||
case reflect.Uint:
|
||||
vals, err = key.parseUints(strs, true, false)
|
||||
case reflect.Uint64:
|
||||
vals, err = key.parseUint64s(strs, true, false)
|
||||
case reflect.Float64:
|
||||
vals, err = key.parseFloat64s(strs, true, false)
|
||||
case reflectTime:
|
||||
vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||
}
|
||||
if isStrict {
|
||||
return err
|
||||
}
|
||||
|
||||
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
|
||||
for i := 0; i < numVals; i++ {
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
|
||||
case reflect.Int:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
|
||||
case reflect.Int64:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
|
||||
case reflect.Uint:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
|
||||
case reflect.Uint64:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
|
||||
case reflect.Float64:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
|
||||
case reflectTime:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
|
||||
}
|
||||
}
|
||||
field.Set(slice)
|
||||
return nil
|
||||
}
|
||||
|
||||
func wrapStrictError(err error, isStrict bool) error {
|
||||
if isStrict {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setWithProperType sets proper value to field based on its type,
|
||||
// but it does not return error for failing parsing,
|
||||
// because we want to use default value that is already assigned to strcut.
|
||||
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if len(key.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
field.SetString(key.String())
|
||||
case reflect.Bool:
|
||||
boolVal, err := key.Bool()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetBool(boolVal)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
durationVal, err := key.Duration()
|
||||
// Skip zero value
|
||||
if err == nil && int(durationVal) > 0 {
|
||||
field.Set(reflect.ValueOf(durationVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
intVal, err := key.Int64()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetInt(intVal)
|
||||
// byte is an alias for uint8, so supporting uint8 breaks support for byte
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
durationVal, err := key.Duration()
|
||||
// Skip zero value
|
||||
if err == nil && int(durationVal) > 0 {
|
||||
field.Set(reflect.ValueOf(durationVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
uintVal, err := key.Uint64()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetUint(uintVal)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
floatVal, err := key.Float64()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetFloat(floatVal)
|
||||
case reflectTime:
|
||||
timeVal, err := key.Time()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.Set(reflect.ValueOf(timeVal))
|
||||
case reflect.Slice:
|
||||
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
|
||||
opts := strings.SplitN(tag, ",", 3)
|
||||
rawName = opts[0]
|
||||
if len(opts) > 1 {
|
||||
omitEmpty = opts[1] == "omitempty"
|
||||
}
|
||||
if len(opts) > 2 {
|
||||
allowShadow = opts[2] == "allowshadow"
|
||||
}
|
||||
return rawName, omitEmpty, allowShadow
|
||||
}
|
||||
|
||||
func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
tpField := typ.Field(i)
|
||||
|
||||
tag := tpField.Tag.Get("ini")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
rawName, _, allowShadow := parseTagOptions(tag)
|
||||
fieldName := s.parseFieldName(tpField.Name, rawName)
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
|
||||
isStruct := tpField.Type.Kind() == reflect.Struct
|
||||
if isAnonymous {
|
||||
field.Set(reflect.New(tpField.Type.Elem()))
|
||||
}
|
||||
|
||||
if isAnonymous || isStruct {
|
||||
if sec, err := s.f.GetSection(fieldName); err == nil {
|
||||
if err = sec.mapTo(field, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if key, err := s.GetKey(fieldName); err == nil {
|
||||
delim := parseDelim(tpField.Tag.Get("delim"))
|
||||
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MapTo maps section to given struct.
|
||||
func (s *Section) MapTo(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val, false)
|
||||
}
|
||||
|
||||
// MapTo maps section to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func (s *Section) StrictMapTo(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val, true)
|
||||
}
|
||||
|
||||
// MapTo maps file to given struct.
|
||||
func (f *File) MapTo(v interface{}) error {
|
||||
return f.Section("").MapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps file to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func (f *File) StrictMapTo(v interface{}) error {
|
||||
return f.Section("").StrictMapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps data sources to given struct with name mapper.
|
||||
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||
cfg, err := Load(source, others...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.MapTo(v)
|
||||
}
|
||||
|
||||
// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||
cfg, err := Load(source, others...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.StrictMapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps data sources to given struct.
|
||||
func MapTo(v, source interface{}, others ...interface{}) error {
|
||||
return MapToWithMapper(v, nil, source, others...)
|
||||
}
|
||||
|
||||
// StrictMapTo maps data sources to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func StrictMapTo(v, source interface{}, others ...interface{}) error {
|
||||
return StrictMapToWithMapper(v, nil, source, others...)
|
||||
}
|
||||
|
||||
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
|
||||
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
|
||||
slice := field.Slice(0, field.Len())
|
||||
if field.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
sliceOf := field.Type().Elem().Kind()
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
buf.WriteString(slice.Index(i).String())
|
||||
case reflect.Int, reflect.Int64:
|
||||
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
|
||||
case reflect.Float64:
|
||||
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
|
||||
case reflectTime:
|
||||
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||
}
|
||||
buf.WriteString(delim)
|
||||
}
|
||||
key.SetValue(buf.String()[:buf.Len()-1])
|
||||
return nil
|
||||
}
|
||||
|
||||
// reflectWithProperType does the opposite thing as setWithProperType.
|
||||
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
key.SetValue(field.String())
|
||||
case reflect.Bool:
|
||||
key.SetValue(fmt.Sprint(field.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
key.SetValue(fmt.Sprint(field.Int()))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
key.SetValue(fmt.Sprint(field.Uint()))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
key.SetValue(fmt.Sprint(field.Float()))
|
||||
case reflectTime:
|
||||
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
|
||||
case reflect.Slice:
|
||||
return reflectSliceWithProperType(key, field, delim)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CR: copied from encoding/json/encode.go with modifications of time.Time support.
|
||||
// TODO: add more test coverage.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflectTime:
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Section) reflectFrom(val reflect.Value) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
tpField := typ.Field(i)
|
||||
|
||||
tag := tpField.Tag.Get("ini")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
opts := strings.SplitN(tag, ",", 2)
|
||||
if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := s.parseFieldName(tpField.Name, opts[0])
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
|
||||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
|
||||
// Note: The only error here is section doesn't exist.
|
||||
sec, err := s.f.GetSection(fieldName)
|
||||
if err != nil {
|
||||
// Note: fieldName can never be empty here, ignore error.
|
||||
sec, _ = s.f.NewSection(fieldName)
|
||||
}
|
||||
if err = sec.reflectFrom(field); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Note: Same reason as secion.
|
||||
key, err := s.GetKey(fieldName)
|
||||
if err != nil {
|
||||
key, _ = s.NewKey(fieldName, "")
|
||||
}
|
||||
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReflectFrom reflects secion from given struct.
|
||||
func (s *Section) ReflectFrom(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot reflect from non-pointer struct")
|
||||
}
|
||||
|
||||
return s.reflectFrom(val)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects file from given struct.
|
||||
func (f *File) ReflectFrom(v interface{}) error {
|
||||
return f.Section("").ReflectFrom(v)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects data sources from given struct with name mapper.
|
||||
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.ReflectFrom(v)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects data sources from given struct.
|
||||
func ReflectFrom(cfg *File, v interface{}) error {
|
||||
return ReflectFromWithMapper(cfg, v, nil)
|
||||
}
|
352
vendor/github.com/go-ini/ini/struct_test.go
generated
vendored
Normal file
352
vendor/github.com/go-ini/ini/struct_test.go
generated
vendored
Normal file
|
@ -0,0 +1,352 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
type testNested struct {
|
||||
Cities []string `delim:"|"`
|
||||
Visits []time.Time
|
||||
Years []int
|
||||
Numbers []int64
|
||||
Ages []uint
|
||||
Populations []uint64
|
||||
Coordinates []float64
|
||||
Note string
|
||||
Unused int `ini:"-"`
|
||||
}
|
||||
|
||||
type testEmbeded struct {
|
||||
GPA float64
|
||||
}
|
||||
|
||||
type testStruct struct {
|
||||
Name string `ini:"NAME"`
|
||||
Age int
|
||||
Male bool
|
||||
Money float64
|
||||
Born time.Time
|
||||
Time time.Duration `ini:"Duration"`
|
||||
Others testNested
|
||||
*testEmbeded `ini:"grade"`
|
||||
Unused int `ini:"-"`
|
||||
Unsigned uint
|
||||
Omitted bool `ini:"omitthis,omitempty"`
|
||||
Shadows []string `ini:",,allowshadow"`
|
||||
ShadowInts []int `ini:"Shadows,,allowshadow"`
|
||||
}
|
||||
|
||||
const _CONF_DATA_STRUCT = `
|
||||
NAME = Unknwon
|
||||
Age = 21
|
||||
Male = true
|
||||
Money = 1.25
|
||||
Born = 1993-10-07T20:17:05Z
|
||||
Duration = 2h45m
|
||||
Unsigned = 3
|
||||
omitthis = true
|
||||
Shadows = 1, 2
|
||||
Shadows = 3, 4
|
||||
|
||||
[Others]
|
||||
Cities = HangZhou|Boston
|
||||
Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z
|
||||
Years = 1993,1994
|
||||
Numbers = 10010,10086
|
||||
Ages = 18,19
|
||||
Populations = 12345678,98765432
|
||||
Coordinates = 192.168,10.11
|
||||
Note = Hello world!
|
||||
|
||||
[grade]
|
||||
GPA = 2.8
|
||||
|
||||
[foo.bar]
|
||||
Here = there
|
||||
When = then
|
||||
`
|
||||
|
||||
type unsupport struct {
|
||||
Byte byte
|
||||
}
|
||||
|
||||
type unsupport2 struct {
|
||||
Others struct {
|
||||
Cities byte
|
||||
}
|
||||
}
|
||||
|
||||
type unsupport3 struct {
|
||||
Cities byte
|
||||
}
|
||||
|
||||
type unsupport4 struct {
|
||||
*unsupport3 `ini:"Others"`
|
||||
}
|
||||
|
||||
type defaultValue struct {
|
||||
Name string
|
||||
Age int
|
||||
Male bool
|
||||
Money float64
|
||||
Born time.Time
|
||||
Cities []string
|
||||
}
|
||||
|
||||
type fooBar struct {
|
||||
Here, When string
|
||||
}
|
||||
|
||||
const _INVALID_DATA_CONF_STRUCT = `
|
||||
Name =
|
||||
Age = age
|
||||
Male = 123
|
||||
Money = money
|
||||
Born = nil
|
||||
Cities =
|
||||
`
|
||||
|
||||
func Test_Struct(t *testing.T) {
|
||||
Convey("Map to struct", t, func() {
|
||||
Convey("Map file to struct", func() {
|
||||
ts := new(testStruct)
|
||||
So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil)
|
||||
|
||||
So(ts.Name, ShouldEqual, "Unknwon")
|
||||
So(ts.Age, ShouldEqual, 21)
|
||||
So(ts.Male, ShouldBeTrue)
|
||||
So(ts.Money, ShouldEqual, 1.25)
|
||||
So(ts.Unsigned, ShouldEqual, 3)
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
So(ts.Born.String(), ShouldEqual, t.String())
|
||||
|
||||
dur, err := time.ParseDuration("2h45m")
|
||||
So(err, ShouldBeNil)
|
||||
So(ts.Time.Seconds(), ShouldEqual, dur.Seconds())
|
||||
|
||||
So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston")
|
||||
So(ts.Others.Visits[0].String(), ShouldEqual, t.String())
|
||||
So(fmt.Sprint(ts.Others.Years), ShouldEqual, "[1993 1994]")
|
||||
So(fmt.Sprint(ts.Others.Numbers), ShouldEqual, "[10010 10086]")
|
||||
So(fmt.Sprint(ts.Others.Ages), ShouldEqual, "[18 19]")
|
||||
So(fmt.Sprint(ts.Others.Populations), ShouldEqual, "[12345678 98765432]")
|
||||
So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]")
|
||||
So(ts.Others.Note, ShouldEqual, "Hello world!")
|
||||
So(ts.testEmbeded.GPA, ShouldEqual, 2.8)
|
||||
})
|
||||
|
||||
Convey("Map section to struct", func() {
|
||||
foobar := new(fooBar)
|
||||
f, err := Load([]byte(_CONF_DATA_STRUCT))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil)
|
||||
So(foobar.Here, ShouldEqual, "there")
|
||||
So(foobar.When, ShouldEqual, "then")
|
||||
})
|
||||
|
||||
Convey("Map to non-pointer struct", func() {
|
||||
cfg, err := Load([]byte(_CONF_DATA_STRUCT))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
So(cfg.MapTo(testStruct{}), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Map to unsupported type", func() {
|
||||
cfg, err := Load([]byte(_CONF_DATA_STRUCT))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
cfg.NameMapper = func(raw string) string {
|
||||
if raw == "Byte" {
|
||||
return "NAME"
|
||||
}
|
||||
return raw
|
||||
}
|
||||
So(cfg.MapTo(&unsupport{}), ShouldNotBeNil)
|
||||
So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil)
|
||||
So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Map to omitempty field", func() {
|
||||
ts := new(testStruct)
|
||||
So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil)
|
||||
|
||||
So(ts.Omitted, ShouldEqual, true)
|
||||
})
|
||||
|
||||
Convey("Map with shadows", func() {
|
||||
cfg, err := LoadSources(LoadOptions{AllowShadows: true}, []byte(_CONF_DATA_STRUCT))
|
||||
So(err, ShouldBeNil)
|
||||
ts := new(testStruct)
|
||||
So(cfg.MapTo(ts), ShouldBeNil)
|
||||
|
||||
So(strings.Join(ts.Shadows, " "), ShouldEqual, "1 2 3 4")
|
||||
So(fmt.Sprintf("%v", ts.ShadowInts), ShouldEqual, "[1 2 3 4]")
|
||||
})
|
||||
|
||||
Convey("Map from invalid data source", func() {
|
||||
So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Map to wrong types and gain default values", func() {
|
||||
cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}}
|
||||
So(cfg.MapTo(dv), ShouldBeNil)
|
||||
So(dv.Name, ShouldEqual, "Joe")
|
||||
So(dv.Age, ShouldEqual, 10)
|
||||
So(dv.Male, ShouldBeTrue)
|
||||
So(dv.Money, ShouldEqual, 1.25)
|
||||
So(dv.Born.String(), ShouldEqual, t.String())
|
||||
So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Map to struct in strict mode", t, func() {
|
||||
cfg, err := Load([]byte(`
|
||||
name=bruce
|
||||
age=a30`))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
type Strict struct {
|
||||
Name string `ini:"name"`
|
||||
Age int `ini:"age"`
|
||||
}
|
||||
s := new(Strict)
|
||||
|
||||
So(cfg.Section("").StrictMapTo(s), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Reflect from struct", t, func() {
|
||||
type Embeded struct {
|
||||
Dates []time.Time `delim:"|"`
|
||||
Places []string
|
||||
Years []int
|
||||
Numbers []int64
|
||||
Ages []uint
|
||||
Populations []uint64
|
||||
Coordinates []float64
|
||||
None []int
|
||||
}
|
||||
type Author struct {
|
||||
Name string `ini:"NAME"`
|
||||
Male bool
|
||||
Age int
|
||||
Height uint
|
||||
GPA float64
|
||||
Date time.Time
|
||||
NeverMind string `ini:"-"`
|
||||
*Embeded `ini:"infos"`
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
|
||||
So(err, ShouldBeNil)
|
||||
a := &Author{"Unknwon", true, 21, 100, 2.8, t, "",
|
||||
&Embeded{
|
||||
[]time.Time{t, t},
|
||||
[]string{"HangZhou", "Boston"},
|
||||
[]int{1993, 1994},
|
||||
[]int64{10010, 10086},
|
||||
[]uint{18, 19},
|
||||
[]uint64{12345678, 98765432},
|
||||
[]float64{192.168, 10.11},
|
||||
[]int{},
|
||||
}}
|
||||
cfg := Empty()
|
||||
So(ReflectFrom(cfg, a), ShouldBeNil)
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = cfg.WriteTo(&buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(buf.String(), ShouldEqual, `NAME = Unknwon
|
||||
Male = true
|
||||
Age = 21
|
||||
Height = 100
|
||||
GPA = 2.8
|
||||
Date = 1993-10-07T20:17:05Z
|
||||
|
||||
[infos]
|
||||
Dates = 1993-10-07T20:17:05Z|1993-10-07T20:17:05Z
|
||||
Places = HangZhou,Boston
|
||||
Years = 1993,1994
|
||||
Numbers = 10010,10086
|
||||
Ages = 18,19
|
||||
Populations = 12345678,98765432
|
||||
Coordinates = 192.168,10.11
|
||||
None =
|
||||
|
||||
`)
|
||||
|
||||
Convey("Reflect from non-point struct", func() {
|
||||
So(ReflectFrom(cfg, Author{}), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Reflect from struct with omitempty", func() {
|
||||
cfg := Empty()
|
||||
type SpecialStruct struct {
|
||||
FirstName string `ini:"first_name"`
|
||||
LastName string `ini:"last_name"`
|
||||
JustOmitMe string `ini:"omitempty"`
|
||||
LastLogin time.Time `ini:"last_login,omitempty"`
|
||||
LastLogin2 time.Time `ini:",omitempty"`
|
||||
NotEmpty int `ini:"omitempty"`
|
||||
}
|
||||
|
||||
So(ReflectFrom(cfg, &SpecialStruct{FirstName: "John", LastName: "Doe", NotEmpty: 9}), ShouldBeNil)
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = cfg.WriteTo(&buf)
|
||||
So(buf.String(), ShouldEqual, `first_name = John
|
||||
last_name = Doe
|
||||
omitempty = 9
|
||||
|
||||
`)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type testMapper struct {
|
||||
PackageName string
|
||||
}
|
||||
|
||||
func Test_NameGetter(t *testing.T) {
|
||||
Convey("Test name mappers", t, func() {
|
||||
So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil)
|
||||
|
||||
cfg, err := Load([]byte("PACKAGE_NAME=ini"))
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg, ShouldNotBeNil)
|
||||
|
||||
cfg.NameMapper = AllCapsUnderscore
|
||||
tg := new(testMapper)
|
||||
So(cfg.MapTo(tg), ShouldBeNil)
|
||||
So(tg.PackageName, ShouldEqual, "ini")
|
||||
})
|
||||
}
|
BIN
vendor/github.com/go-ini/ini/testdata/UTF-16-BE-BOM.ini
generated
vendored
Normal file
BIN
vendor/github.com/go-ini/ini/testdata/UTF-16-BE-BOM.ini
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/go-ini/ini/testdata/UTF-16-LE-BOM.ini
generated
vendored
Normal file
BIN
vendor/github.com/go-ini/ini/testdata/UTF-16-LE-BOM.ini
generated
vendored
Normal file
Binary file not shown.
2
vendor/github.com/go-ini/ini/testdata/UTF-8-BOM.ini
generated
vendored
Normal file
2
vendor/github.com/go-ini/ini/testdata/UTF-8-BOM.ini
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
[author]
|
||||
E-MAIL = u@gogs.io
|
11
vendor/github.com/go-ini/ini/testdata/aicc.ini
generated
vendored
Normal file
11
vendor/github.com/go-ini/ini/testdata/aicc.ini
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
[Core]
|
||||
Lesson_Location = 87
|
||||
Lesson_Status = C
|
||||
Score = 3
|
||||
Time = 00:02:30
|
||||
|
||||
[CORE_LESSON]
|
||||
my lesson state data – 1111111111111111111000000000000000001110000
|
||||
111111111111111111100000000000111000000000 – end my lesson state data
|
||||
[COMMENTS]
|
||||
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
|
2
vendor/github.com/go-ini/ini/testdata/conf.ini
generated
vendored
Normal file
2
vendor/github.com/go-ini/ini/testdata/conf.ini
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
[author]
|
||||
E-MAIL = u@gogs.io
|
13
vendor/github.com/inconshreveable/mousetrap/LICENSE
generated
vendored
Normal file
13
vendor/github.com/inconshreveable/mousetrap/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
Copyright 2014 Alan Shreve
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
# mousetrap
|
||||
|
||||
mousetrap is a tiny library that answers a single question.
|
||||
|
||||
On a Windows machine, was the process invoked by someone double clicking on
|
||||
the executable file while browsing in explorer?
|
||||
|
||||
### Motivation
|
||||
|
||||
Windows developers unfamiliar with command line tools will often "double-click"
|
||||
the executable for a tool. Because most CLI tools print the help and then exit
|
||||
when invoked without arguments, this is often very frustrating for those users.
|
||||
|
||||
mousetrap provides a way to detect these invocations so that you can provide
|
||||
more helpful behavior and instructions on how to run the CLI tool. To see what
|
||||
this looks like, both from an organizational and a technical perspective, see
|
||||
https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
|
||||
|
||||
### The interface
|
||||
|
||||
The library exposes a single interface:
|
||||
|
||||
func StartedByExplorer() (bool)
|
15
vendor/github.com/inconshreveable/mousetrap/trap_others.go
generated
vendored
Normal file
15
vendor/github.com/inconshreveable/mousetrap/trap_others.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// +build !windows
|
||||
|
||||
package mousetrap
|
||||
|
||||
// StartedByExplorer returns true if the program was invoked by the user
|
||||
// double-clicking on the executable from explorer.exe
|
||||
//
|
||||
// It is conservative and returns false if any of the internal calls fail.
|
||||
// It does not guarantee that the program was run from a terminal. It only can tell you
|
||||
// whether it was launched from explorer.exe
|
||||
//
|
||||
// On non-Windows platforms, it always returns false.
|
||||
func StartedByExplorer() bool {
|
||||
return false
|
||||
}
|
98
vendor/github.com/inconshreveable/mousetrap/trap_windows.go
generated
vendored
Normal file
98
vendor/github.com/inconshreveable/mousetrap/trap_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// +build windows
|
||||
// +build !go1.4
|
||||
|
||||
package mousetrap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// defined by the Win32 API
|
||||
th32cs_snapprocess uintptr = 0x2
|
||||
)
|
||||
|
||||
var (
|
||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
||||
CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
|
||||
Process32First = kernel.MustFindProc("Process32FirstW")
|
||||
Process32Next = kernel.MustFindProc("Process32NextW")
|
||||
)
|
||||
|
||||
// ProcessEntry32 structure defined by the Win32 API
|
||||
type processEntry32 struct {
|
||||
dwSize uint32
|
||||
cntUsage uint32
|
||||
th32ProcessID uint32
|
||||
th32DefaultHeapID int
|
||||
th32ModuleID uint32
|
||||
cntThreads uint32
|
||||
th32ParentProcessID uint32
|
||||
pcPriClassBase int32
|
||||
dwFlags uint32
|
||||
szExeFile [syscall.MAX_PATH]uint16
|
||||
}
|
||||
|
||||
func getProcessEntry(pid int) (pe *processEntry32, err error) {
|
||||
snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
|
||||
if snapshot == uintptr(syscall.InvalidHandle) {
|
||||
err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
|
||||
return
|
||||
}
|
||||
defer syscall.CloseHandle(syscall.Handle(snapshot))
|
||||
|
||||
var processEntry processEntry32
|
||||
processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
|
||||
ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
|
||||
if ok == 0 {
|
||||
err = fmt.Errorf("Process32First: %v", e1)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if processEntry.th32ProcessID == uint32(pid) {
|
||||
pe = &processEntry
|
||||
return
|
||||
}
|
||||
|
||||
ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
|
||||
if ok == 0 {
|
||||
err = fmt.Errorf("Process32Next: %v", e1)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getppid() (pid int, err error) {
|
||||
pe, err := getProcessEntry(os.Getpid())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pid = int(pe.th32ParentProcessID)
|
||||
return
|
||||
}
|
||||
|
||||
// StartedByExplorer returns true if the program was invoked by the user double-clicking
|
||||
// on the executable from explorer.exe
|
||||
//
|
||||
// It is conservative and returns false if any of the internal calls fail.
|
||||
// It does not guarantee that the program was run from a terminal. It only can tell you
|
||||
// whether it was launched from explorer.exe
|
||||
func StartedByExplorer() bool {
|
||||
ppid, err := getppid()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
pe, err := getProcessEntry(ppid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
name := syscall.UTF16ToString(pe.szExeFile[:])
|
||||
return name == "explorer.exe"
|
||||
}
|
46
vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
generated
vendored
Normal file
46
vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// +build windows
|
||||
// +build go1.4
|
||||
|
||||
package mousetrap
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
|
||||
snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CloseHandle(snapshot)
|
||||
var procEntry syscall.ProcessEntry32
|
||||
procEntry.Size = uint32(unsafe.Sizeof(procEntry))
|
||||
if err = syscall.Process32First(snapshot, &procEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for {
|
||||
if procEntry.ProcessID == uint32(pid) {
|
||||
return &procEntry, nil
|
||||
}
|
||||
err = syscall.Process32Next(snapshot, &procEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartedByExplorer returns true if the program was invoked by the user double-clicking
|
||||
// on the executable from explorer.exe
|
||||
//
|
||||
// It is conservative and returns false if any of the internal calls fail.
|
||||
// It does not guarantee that the program was run from a terminal. It only can tell you
|
||||
// whether it was launched from explorer.exe
|
||||
func StartedByExplorer() bool {
|
||||
pe, err := getProcessEntry(os.Getppid())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
|
||||
}
|
27
vendor/github.com/kr/fs/LICENSE
generated
vendored
Normal file
27
vendor/github.com/kr/fs/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
3
vendor/github.com/kr/fs/Readme
generated
vendored
Normal file
3
vendor/github.com/kr/fs/Readme
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
Filesystem Package
|
||||
|
||||
http://godoc.org/github.com/kr/fs
|
19
vendor/github.com/kr/fs/example_test.go
generated
vendored
Normal file
19
vendor/github.com/kr/fs/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
package fs_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kr/fs"
|
||||
)
|
||||
|
||||
func ExampleWalker() {
|
||||
walker := fs.Walk("/usr/lib")
|
||||
for walker.Step() {
|
||||
if err := walker.Err(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
continue
|
||||
}
|
||||
fmt.Println(walker.Path())
|
||||
}
|
||||
}
|
36
vendor/github.com/kr/fs/filesystem.go
generated
vendored
Normal file
36
vendor/github.com/kr/fs/filesystem.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// FileSystem defines the methods of an abstract filesystem.
|
||||
type FileSystem interface {
|
||||
|
||||
// ReadDir reads the directory named by dirname and returns a
|
||||
// list of directory entries.
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
|
||||
// Lstat returns a FileInfo describing the named file. If the file is a
|
||||
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
|
||||
// makes no attempt to follow the link.
|
||||
Lstat(name string) (os.FileInfo, error)
|
||||
|
||||
// Join joins any number of path elements into a single path, adding a
|
||||
// separator if necessary. The result is Cleaned; in particular, all
|
||||
// empty strings are ignored.
|
||||
//
|
||||
// The separator is FileSystem specific.
|
||||
Join(elem ...string) string
|
||||
}
|
||||
|
||||
// fs represents a FileSystem provided by the os package.
|
||||
type fs struct{}
|
||||
|
||||
func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) }
|
||||
|
||||
func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
|
||||
|
||||
func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) }
|
95
vendor/github.com/kr/fs/walk.go
generated
vendored
Normal file
95
vendor/github.com/kr/fs/walk.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
// Package fs provides filesystem-related functions.
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Walker provides a convenient interface for iterating over the
|
||||
// descendants of a filesystem path.
|
||||
// Successive calls to the Step method will step through each
|
||||
// file or directory in the tree, including the root. The files
|
||||
// are walked in lexical order, which makes the output deterministic
|
||||
// but means that for very large directories Walker can be inefficient.
|
||||
// Walker does not follow symbolic links.
|
||||
type Walker struct {
|
||||
fs FileSystem
|
||||
cur item
|
||||
stack []item
|
||||
descend bool
|
||||
}
|
||||
|
||||
type item struct {
|
||||
path string
|
||||
info os.FileInfo
|
||||
err error
|
||||
}
|
||||
|
||||
// Walk returns a new Walker rooted at root.
|
||||
func Walk(root string) *Walker {
|
||||
return WalkFS(root, new(fs))
|
||||
}
|
||||
|
||||
// WalkFS returns a new Walker rooted at root on the FileSystem fs.
|
||||
func WalkFS(root string, fs FileSystem) *Walker {
|
||||
info, err := fs.Lstat(root)
|
||||
return &Walker{
|
||||
fs: fs,
|
||||
stack: []item{{root, info, err}},
|
||||
}
|
||||
}
|
||||
|
||||
// Step advances the Walker to the next file or directory,
|
||||
// which will then be available through the Path, Stat,
|
||||
// and Err methods.
|
||||
// It returns false when the walk stops at the end of the tree.
|
||||
func (w *Walker) Step() bool {
|
||||
if w.descend && w.cur.err == nil && w.cur.info.IsDir() {
|
||||
list, err := w.fs.ReadDir(w.cur.path)
|
||||
if err != nil {
|
||||
w.cur.err = err
|
||||
w.stack = append(w.stack, w.cur)
|
||||
} else {
|
||||
for i := len(list) - 1; i >= 0; i-- {
|
||||
path := w.fs.Join(w.cur.path, list[i].Name())
|
||||
w.stack = append(w.stack, item{path, list[i], nil})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(w.stack) == 0 {
|
||||
return false
|
||||
}
|
||||
i := len(w.stack) - 1
|
||||
w.cur = w.stack[i]
|
||||
w.stack = w.stack[:i]
|
||||
w.descend = true
|
||||
return true
|
||||
}
|
||||
|
||||
// Path returns the path to the most recent file or directory
|
||||
// visited by a call to Step. It contains the argument to Walk
|
||||
// as a prefix; that is, if Walk is called with "dir", which is
|
||||
// a directory containing the file "a", Path will return "dir/a".
|
||||
func (w *Walker) Path() string {
|
||||
return w.cur.path
|
||||
}
|
||||
|
||||
// Stat returns info for the most recent file or directory
|
||||
// visited by a call to Step.
|
||||
func (w *Walker) Stat() os.FileInfo {
|
||||
return w.cur.info
|
||||
}
|
||||
|
||||
// Err returns the error, if any, for the most recent attempt
|
||||
// by Step to visit a file or directory. If a directory has
|
||||
// an error, w will not descend into that directory.
|
||||
func (w *Walker) Err() error {
|
||||
return w.cur.err
|
||||
}
|
||||
|
||||
// SkipDir causes the currently visited directory to be skipped.
|
||||
// If w is not on a directory, SkipDir has no effect.
|
||||
func (w *Walker) SkipDir() {
|
||||
w.descend = false
|
||||
}
|
209
vendor/github.com/kr/fs/walk_test.go
generated
vendored
Normal file
209
vendor/github.com/kr/fs/walk_test.go
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/kr/fs"
|
||||
)
|
||||
|
||||
type PathTest struct {
|
||||
path, result string
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
name string
|
||||
entries []*Node // nil if the entry is a file
|
||||
mark int
|
||||
}
|
||||
|
||||
var tree = &Node{
|
||||
"testdata",
|
||||
[]*Node{
|
||||
{"a", nil, 0},
|
||||
{"b", []*Node{}, 0},
|
||||
{"c", nil, 0},
|
||||
{
|
||||
"d",
|
||||
[]*Node{
|
||||
{"x", nil, 0},
|
||||
{"y", []*Node{}, 0},
|
||||
{
|
||||
"z",
|
||||
[]*Node{
|
||||
{"u", nil, 0},
|
||||
{"v", nil, 0},
|
||||
},
|
||||
0,
|
||||
},
|
||||
},
|
||||
0,
|
||||
},
|
||||
},
|
||||
0,
|
||||
}
|
||||
|
||||
func walkTree(n *Node, path string, f func(path string, n *Node)) {
|
||||
f(path, n)
|
||||
for _, e := range n.entries {
|
||||
walkTree(e, filepath.Join(path, e.name), f)
|
||||
}
|
||||
}
|
||||
|
||||
func makeTree(t *testing.T) {
|
||||
walkTree(tree, tree.name, func(path string, n *Node) {
|
||||
if n.entries == nil {
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
t.Errorf("makeTree: %v", err)
|
||||
return
|
||||
}
|
||||
fd.Close()
|
||||
} else {
|
||||
os.Mkdir(path, 0770)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func markTree(n *Node) { walkTree(n, "", func(path string, n *Node) { n.mark++ }) }
|
||||
|
||||
func checkMarks(t *testing.T, report bool) {
|
||||
walkTree(tree, tree.name, func(path string, n *Node) {
|
||||
if n.mark != 1 && report {
|
||||
t.Errorf("node %s mark = %d; expected 1", path, n.mark)
|
||||
}
|
||||
n.mark = 0
|
||||
})
|
||||
}
|
||||
|
||||
// Assumes that each node name is unique. Good enough for a test.
|
||||
// If clear is true, any incoming error is cleared before return. The errors
|
||||
// are always accumulated, though.
|
||||
func mark(path string, info os.FileInfo, err error, errors *[]error, clear bool) error {
|
||||
if err != nil {
|
||||
*errors = append(*errors, err)
|
||||
if clear {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
name := info.Name()
|
||||
walkTree(tree, tree.name, func(path string, n *Node) {
|
||||
if n.name == name {
|
||||
n.mark++
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
makeTree(t)
|
||||
errors := make([]error, 0, 10)
|
||||
clear := true
|
||||
markFn := func(walker *fs.Walker) (err error) {
|
||||
for walker.Step() {
|
||||
err = mark(walker.Path(), walker.Stat(), walker.Err(), &errors, clear)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Expect no errors.
|
||||
err := markFn(fs.Walk(tree.name))
|
||||
if err != nil {
|
||||
t.Fatalf("no error expected, found: %s", err)
|
||||
}
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("unexpected errors: %s", errors)
|
||||
}
|
||||
checkMarks(t, true)
|
||||
errors = errors[0:0]
|
||||
|
||||
// Test permission errors. Only possible if we're not root
|
||||
// and only on some file systems (AFS, FAT). To avoid errors during
|
||||
// all.bash on those file systems, skip during go test -short.
|
||||
if os.Getuid() > 0 && !testing.Short() {
|
||||
// introduce 2 errors: chmod top-level directories to 0
|
||||
os.Chmod(filepath.Join(tree.name, tree.entries[1].name), 0)
|
||||
os.Chmod(filepath.Join(tree.name, tree.entries[3].name), 0)
|
||||
|
||||
// 3) capture errors, expect two.
|
||||
// mark respective subtrees manually
|
||||
markTree(tree.entries[1])
|
||||
markTree(tree.entries[3])
|
||||
// correct double-marking of directory itself
|
||||
tree.entries[1].mark--
|
||||
tree.entries[3].mark--
|
||||
err := markFn(fs.Walk(tree.name))
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error return from Walk, got %s", err)
|
||||
}
|
||||
if len(errors) != 2 {
|
||||
t.Errorf("expected 2 errors, got %d: %s", len(errors), errors)
|
||||
}
|
||||
// the inaccessible subtrees were marked manually
|
||||
checkMarks(t, true)
|
||||
errors = errors[0:0]
|
||||
|
||||
// 4) capture errors, stop after first error.
|
||||
// mark respective subtrees manually
|
||||
markTree(tree.entries[1])
|
||||
markTree(tree.entries[3])
|
||||
// correct double-marking of directory itself
|
||||
tree.entries[1].mark--
|
||||
tree.entries[3].mark--
|
||||
clear = false // error will stop processing
|
||||
err = markFn(fs.Walk(tree.name))
|
||||
if err == nil {
|
||||
t.Fatalf("expected error return from Walk")
|
||||
}
|
||||
if len(errors) != 1 {
|
||||
t.Errorf("expected 1 error, got %d: %s", len(errors), errors)
|
||||
}
|
||||
// the inaccessible subtrees were marked manually
|
||||
checkMarks(t, false)
|
||||
errors = errors[0:0]
|
||||
|
||||
// restore permissions
|
||||
os.Chmod(filepath.Join(tree.name, tree.entries[1].name), 0770)
|
||||
os.Chmod(filepath.Join(tree.name, tree.entries[3].name), 0770)
|
||||
}
|
||||
|
||||
// cleanup
|
||||
if err := os.RemoveAll(tree.name); err != nil {
|
||||
t.Errorf("removeTree: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBug3486(t *testing.T) { // http://code.google.com/p/go/issues/detail?id=3486
|
||||
root, err := filepath.EvalSymlinks(runtime.GOROOT())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lib := filepath.Join(root, "lib")
|
||||
src := filepath.Join(root, "src")
|
||||
seenSrc := false
|
||||
walker := fs.Walk(root)
|
||||
for walker.Step() {
|
||||
if walker.Err() != nil {
|
||||
t.Fatal(walker.Err())
|
||||
}
|
||||
|
||||
switch walker.Path() {
|
||||
case lib:
|
||||
walker.SkipDir()
|
||||
case src:
|
||||
seenSrc = true
|
||||
}
|
||||
}
|
||||
if !seenSrc {
|
||||
t.Fatalf("%q not seen", src)
|
||||
}
|
||||
}
|
26
vendor/github.com/kurin/blazer/.gitignore
generated
vendored
Normal file
26
vendor/github.com/kurin/blazer/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
bonfire
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
10
vendor/github.com/kurin/blazer/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/kurin/blazer/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: B2_LOG_LEVEL=2 go test -v ./base ./b2
|
27
vendor/github.com/kurin/blazer/CONTRIBUTING.md
generated
vendored
Normal file
27
vendor/github.com/kurin/blazer/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Want to contribute? Great! First, read this page (including the small print at the end).
|
||||
|
||||
### Before you contribute
|
||||
Before we can use your code, you must sign the
|
||||
[Google Individual Contributor License Agreement]
|
||||
(https://cla.developers.google.com/about/google-individual)
|
||||
(CLA), which you can do online. The CLA is necessary mainly because you own the
|
||||
copyright to your changes, even after your contribution becomes part of our
|
||||
codebase, so we need your permission to use and distribute your code. We also
|
||||
need to be sure of various other things—for instance that you'll tell us if you
|
||||
know that your code infringes on other people's patents. You don't have to sign
|
||||
the CLA until after you've submitted your code for review and a member has
|
||||
approved it, but you must do it before we can put your code into our codebase.
|
||||
Before you start working on a larger contribution, you should get in touch with
|
||||
us first through the issue tracker with your idea so that we can help out and
|
||||
possibly guide you. Coordinating up front makes it much easier to avoid
|
||||
frustration later on.
|
||||
|
||||
### Code reviews
|
||||
All submissions, including submissions by project members, require review. We
|
||||
use Github pull requests for this purpose.
|
||||
|
||||
### The small print
|
||||
Contributions made by corporations are covered by a different agreement than
|
||||
the one above, the
|
||||
[Software Grant and Corporate Contributor License Agreement]
|
||||
(https://cla.developers.google.com/about/google-corporate).
|
13
vendor/github.com/kurin/blazer/LICENSE
generated
vendored
Normal file
13
vendor/github.com/kurin/blazer/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
Copyright 2016, Google
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
139
vendor/github.com/kurin/blazer/README.md
generated
vendored
Normal file
139
vendor/github.com/kurin/blazer/README.md
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
Blazer
|
||||
====
|
||||
|
||||
[](https://godoc.org/github.com/kurin/blazer/b2)
|
||||
[](https://travis-ci.org/kurin/blazer)
|
||||
|
||||
Blazer is a Golang client library for Backblaze's B2 object storage service.
|
||||
It is designed for simple integration with existing applications that may
|
||||
already be using S3 and Google Cloud Storage, by exporting only a few standard
|
||||
Go types.
|
||||
|
||||
It implements and satisfies the [B2 integration
|
||||
checklist](https://www.backblaze.com/b2/docs/integration_checklist.html),
|
||||
automatically handling error recovery, reauthentication, and other low-level
|
||||
aspects, making it suitable to upload very large files, or over multi-day time
|
||||
scales.
|
||||
|
||||
```go
|
||||
import "github.com/kurin/blazer/b2"
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Copy a file into B2
|
||||
|
||||
```go
|
||||
func copyFile(ctx context.Context, bucket *b2.Bucket, src, dst string) error {
|
||||
f, err := file.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
obj := bucket.Object(dst)
|
||||
w := obj.NewWriter(ctx)
|
||||
if _, err := io.Copy(w, f); err != nil {
|
||||
w.Close()
|
||||
return err
|
||||
}
|
||||
return w.Close()
|
||||
}
|
||||
```
|
||||
|
||||
If the file is less than 100MB, Blazer will simply buffer the file and use the
|
||||
`b2_upload_file` API to send the file to Backblaze. If the file is greater
|
||||
than 100MB, Blazer will use B2's large file support to upload the file in 100MB
|
||||
chunks.
|
||||
|
||||
### Copy a file into B2, with multiple concurrent uploads
|
||||
|
||||
Uploading a large file with multiple HTTP connections is simple:
|
||||
|
||||
```go
|
||||
func copyFile(ctx context.Context, bucket *b2.Bucket, writers int, src, dst string) error {
|
||||
f, err := file.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
w := bucket.Object(dst).NewWriter(ctx)
|
||||
w.ConcurrentUploads = writers
|
||||
if _, err := io.Copy(w, f); err != nil {
|
||||
w.Close()
|
||||
return err
|
||||
}
|
||||
return w.Close()
|
||||
}
|
||||
```
|
||||
|
||||
This will automatically split the file into `writers` chunks of 100MB uploads.
|
||||
Note that 100MB is the smallest chunk size that B2 supports.
|
||||
|
||||
### Download a file from B2
|
||||
|
||||
Downloading is as simple as uploading:
|
||||
|
||||
```go
|
||||
func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, dst string) error {
|
||||
r := bucket.Object(src).NewReader(ctx)
|
||||
defer r.Close()
|
||||
|
||||
f, err := file.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ConcurrentDownloads = downloads
|
||||
if _, err := io.Copy(f, r); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
```
|
||||
|
||||
### List all objects in a bucket
|
||||
|
||||
```go
|
||||
func printObjects(ctx context.Context, bucket *b2.Bucket) error {
|
||||
var cur *b2.Cursor
|
||||
for {
|
||||
objs, c, err := bucket.ListObjects(ctx, 1000, cur)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
for _, obj := range objs {
|
||||
fmt.Println(obj)
|
||||
}
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
cur = c
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Grant temporary auth to a file
|
||||
|
||||
Say you have a number of files in a private bucket, and you want to allow other
|
||||
people to download some files. This is possible to do by issuing a temporary
|
||||
authorization token for the prefix of the files you want to share.
|
||||
|
||||
```go
|
||||
token, err := bucket.AuthToken(ctx, "photos", time.Hour)
|
||||
```
|
||||
|
||||
If successful, `token` is then an authorization token valid for one hour, which
|
||||
can be set in HTTP GET requests.
|
||||
|
||||
The hostname to use when downloading files via HTTP is account-specific and can
|
||||
be found via the BaseURL method:
|
||||
|
||||
```go
|
||||
base := bucket.BaseURL()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
This is not an official Google product.
|
603
vendor/github.com/kurin/blazer/b2/b2.go
generated
vendored
Normal file
603
vendor/github.com/kurin/blazer/b2/b2.go
generated
vendored
Normal file
|
@ -0,0 +1,603 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package b2 provides a high-level interface to Backblaze's B2 cloud storage
|
||||
// service.
|
||||
//
|
||||
// It is specifically designed to abstract away the Backblaze API details by
|
||||
// providing familiar Go interfaces, specifically an io.Writer for object
|
||||
// storage, and an io.Reader for object download. Handling of transient
|
||||
// errors, including network and authentication timeouts, is transparent.
|
||||
//
|
||||
// Methods that perform network requests accept a context.Context argument.
|
||||
// Callers should use the context's cancellation abilities to end requests
|
||||
// early, or to provide timeout or deadline guarantees.
|
||||
//
|
||||
// This package is in development and may make API changes.
|
||||
package b2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Client is a Backblaze B2 client.
|
||||
type Client struct {
|
||||
backend beRootInterface
|
||||
|
||||
slock sync.Mutex
|
||||
sWriters map[string]*Writer
|
||||
sReaders map[string]*Reader
|
||||
}
|
||||
|
||||
// NewClient creates and returns a new Client with valid B2 service account
|
||||
// tokens.
|
||||
func NewClient(ctx context.Context, account, key string, opts ...ClientOption) (*Client, error) {
|
||||
c := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: &b2Root{},
|
||||
},
|
||||
}
|
||||
if err := c.backend.authorizeAccount(ctx, account, key, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
type clientOptions struct {
|
||||
transport http.RoundTripper
|
||||
failSomeUploads bool
|
||||
expireTokens bool
|
||||
capExceeded bool
|
||||
}
|
||||
|
||||
// A ClientOption allows callers to adjust various per-client settings.
|
||||
type ClientOption func(*clientOptions)
|
||||
|
||||
// Transport sets the underlying HTTP transport mechanism. If unset,
|
||||
// http.DefaultTransport is used.
|
||||
func Transport(rt http.RoundTripper) ClientOption {
|
||||
return func(c *clientOptions) {
|
||||
c.transport = rt
|
||||
}
|
||||
}
|
||||
|
||||
// FailSomeUploads requests intermittent upload failures from the B2 service.
|
||||
// This is mostly useful for testing.
|
||||
func FailSomeUploads() ClientOption {
|
||||
return func(c *clientOptions) {
|
||||
c.failSomeUploads = true
|
||||
}
|
||||
}
|
||||
|
||||
// ExpireSomeAuthTokens requests intermittent authentication failures from the
|
||||
// B2 service.
|
||||
func ExpireSomeAuthTokens() ClientOption {
|
||||
return func(c *clientOptions) {
|
||||
c.expireTokens = true
|
||||
}
|
||||
}
|
||||
|
||||
// ForceCapExceeded requests a cap limit from the B2 service. This causes all
|
||||
// uploads to be treated as if they would exceed the configure B2 capacity.
|
||||
func ForceCapExceeded() ClientOption {
|
||||
return func(c *clientOptions) {
|
||||
c.capExceeded = true
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket is a reference to a B2 bucket.
|
||||
type Bucket struct {
|
||||
b beBucketInterface
|
||||
r beRootInterface
|
||||
|
||||
c *Client
|
||||
}
|
||||
|
||||
type BucketType string
|
||||
|
||||
const (
|
||||
UnknownType BucketType = ""
|
||||
Private = "allPrivate"
|
||||
Public = "allPublic"
|
||||
)
|
||||
|
||||
// BucketAttrs holds a bucket's metadata attributes.
|
||||
type BucketAttrs struct {
|
||||
// Type lists or sets the new bucket type. If Type is UnknownType during a
|
||||
// bucket.Update, the type is not changed.
|
||||
Type BucketType
|
||||
|
||||
// Info records user data, limited to ten keys. If nil during a
|
||||
// bucket.Update, the existing bucket info is not modified. A bucket's
|
||||
// metadata can be removed by updating with an empty map.
|
||||
Info map[string]string
|
||||
|
||||
// Reports or sets bucket lifecycle rules. If nil during a bucket.Update,
|
||||
// the rules are not modified. A bucket's rules can be removed by updating
|
||||
// with an empty slice.
|
||||
LifecycleRules []LifecycleRule
|
||||
}
|
||||
|
||||
// A LifecycleRule describes an object's life cycle, namely how many days after
|
||||
// uploading an object should be hidden, and after how many days hidden an
|
||||
// object should be deleted. Multiple rules may not apply to the same file or
|
||||
// set of files. Be careful when using this feature; it can (is designed to)
|
||||
// delete your data.
|
||||
type LifecycleRule struct {
|
||||
// Prefix specifies all the files in the bucket to which this rule applies.
|
||||
Prefix string
|
||||
|
||||
// DaysUploadedUntilHidden specifies the number of days after which a file
|
||||
// will automatically be hidden. 0 means "do not automatically hide new
|
||||
// files".
|
||||
DaysNewUntilHidden int
|
||||
|
||||
// DaysHiddenUntilDeleted specifies the number of days after which a hidden
|
||||
// file is deleted. 0 means "do not automatically delete hidden files".
|
||||
DaysHiddenUntilDeleted int
|
||||
}
|
||||
|
||||
type b2err struct {
|
||||
err error
|
||||
notFoundErr bool
|
||||
isUpdateConflict bool
|
||||
}
|
||||
|
||||
func (e b2err) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// IsNotExist reports whether a given error indicates that an object or bucket
|
||||
// does not exist.
|
||||
func IsNotExist(err error) bool {
|
||||
berr, ok := err.(b2err)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return berr.notFoundErr
|
||||
}
|
||||
|
||||
// Bucket returns a bucket if it exists.
|
||||
func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) {
|
||||
buckets, err := c.backend.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets {
|
||||
if bucket.name() == name {
|
||||
return &Bucket{
|
||||
b: bucket,
|
||||
r: c.backend,
|
||||
c: c,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, b2err{
|
||||
err: fmt.Errorf("%s: bucket not found", name),
|
||||
notFoundErr: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBucket returns a bucket. The bucket is created with the given attributes
|
||||
// if it does not already exist. If attrs is nil, it is created as a private
|
||||
// bucket with no info metadata and no lifecycle rules.
|
||||
func (c *Client) NewBucket(ctx context.Context, name string, attrs *BucketAttrs) (*Bucket, error) {
|
||||
buckets, err := c.backend.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets {
|
||||
if bucket.name() == name {
|
||||
return &Bucket{
|
||||
b: bucket,
|
||||
r: c.backend,
|
||||
c: c,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
if attrs == nil {
|
||||
attrs = &BucketAttrs{Type: Private}
|
||||
}
|
||||
b, err := c.backend.createBucket(ctx, name, string(attrs.Type), attrs.Info, attrs.LifecycleRules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Bucket{
|
||||
b: b,
|
||||
r: c.backend,
|
||||
c: c,
|
||||
}, err
|
||||
}
|
||||
|
||||
// ListBucket returns all the available buckets.
|
||||
func (c *Client) ListBuckets(ctx context.Context) ([]*Bucket, error) {
|
||||
bs, err := c.backend.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buckets []*Bucket
|
||||
for _, b := range bs {
|
||||
buckets = append(buckets, &Bucket{
|
||||
b: b,
|
||||
r: c.backend,
|
||||
c: c,
|
||||
})
|
||||
}
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
// IsUpdateConflict reports whether a given error is the result of a bucket
|
||||
// update conflict.
|
||||
func IsUpdateConflict(err error) bool {
|
||||
e, ok := err.(b2err)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return e.isUpdateConflict
|
||||
}
|
||||
|
||||
// Update modifies the given bucket with new attributes. It is possible that
|
||||
// this method could fail with an update conflict, in which case you should
|
||||
// retrieve the latest bucket attributes with Attrs and try again.
|
||||
func (b *Bucket) Update(ctx context.Context, attrs *BucketAttrs) error {
|
||||
return b.b.updateBucket(ctx, attrs)
|
||||
}
|
||||
|
||||
// Attrs retrieves and returns the current bucket's attributes.
|
||||
func (b *Bucket) Attrs(ctx context.Context) (*BucketAttrs, error) {
|
||||
bucket, err := b.c.Bucket(ctx, b.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.b = bucket.b
|
||||
return b.b.attrs(), nil
|
||||
}
|
||||
|
||||
var bNotExist = regexp.MustCompile("Bucket.*does not exist")
|
||||
|
||||
// Delete removes a bucket. The bucket must be empty.
|
||||
func (b *Bucket) Delete(ctx context.Context) error {
|
||||
err := b.b.deleteBucket(ctx)
|
||||
if err == nil {
|
||||
return err
|
||||
}
|
||||
// So, the B2 documentation disagrees with the implementation here, and the
|
||||
// error code is not really helpful. If the bucket doesn't exist, the error is
|
||||
// 400, not 404, and the string is "Bucket <name> does not exist". However, the
|
||||
// documentation says it will be "Bucket id <name> does not exist". In case
|
||||
// they update the implementation to match the documentation, we're just going
|
||||
// to regexp over the error message and hope it's okay.
|
||||
if bNotExist.MatchString(err.Error()) {
|
||||
return b2err{
|
||||
err: err,
|
||||
notFoundErr: true,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// BaseURL returns the base URL to use for all files uploaded to this bucket.
|
||||
func (b *Bucket) BaseURL() string {
|
||||
return b.b.baseURL()
|
||||
}
|
||||
|
||||
// Name returns the bucket's name.
|
||||
func (b *Bucket) Name() string {
|
||||
return b.b.name()
|
||||
}
|
||||
|
||||
// Object represents a B2 object.
|
||||
type Object struct {
|
||||
attrs *Attrs
|
||||
name string
|
||||
f beFileInterface
|
||||
b *Bucket
|
||||
}
|
||||
|
||||
// Attrs holds an object's metadata.
|
||||
type Attrs struct {
|
||||
Name string // Not used on upload.
|
||||
Size int64 // Not used on upload.
|
||||
ContentType string // Used on upload, default is "application/octet-stream".
|
||||
Status ObjectState // Not used on upload.
|
||||
UploadTimestamp time.Time // Not used on upload.
|
||||
SHA1 string // Not used on upload. Can be "none" for large files.
|
||||
LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload.
|
||||
Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys.
|
||||
}
|
||||
|
||||
// Name returns an object's name
|
||||
func (o *Object) Name() string {
|
||||
return o.name
|
||||
}
|
||||
|
||||
// Attrs returns an object's attributes.
|
||||
func (o *Object) Attrs(ctx context.Context) (*Attrs, error) {
|
||||
if err := o.ensure(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err := o.f.getFileInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name, sha, size, ct, info, st, stamp := fi.stats()
|
||||
var state ObjectState
|
||||
switch st {
|
||||
case "upload":
|
||||
state = Uploaded
|
||||
case "start":
|
||||
state = Started
|
||||
case "hide":
|
||||
state = Hider
|
||||
case "folder":
|
||||
state = Folder
|
||||
}
|
||||
var mtime time.Time
|
||||
if v, ok := info["src_last_modified_millis"]; ok {
|
||||
ms, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mtime = time.Unix(ms/1e3, (ms%1e3)*1e6)
|
||||
delete(info, "src_last_modified_millis")
|
||||
}
|
||||
return &Attrs{
|
||||
Name: name,
|
||||
Size: size,
|
||||
ContentType: ct,
|
||||
UploadTimestamp: stamp,
|
||||
SHA1: sha,
|
||||
Info: info,
|
||||
Status: state,
|
||||
LastModified: mtime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ObjectState represents the various states an object can be in.
|
||||
type ObjectState int
|
||||
|
||||
const (
|
||||
Unknown ObjectState = iota
|
||||
// Started represents a large upload that has been started but not finished
|
||||
// or canceled.
|
||||
Started
|
||||
// Uploaded represents an object that has finished uploading and is complete.
|
||||
Uploaded
|
||||
// Hider represents an object that exists only to hide another object. It
|
||||
// cannot in itself be downloaded and, in particular, is not a hidden object.
|
||||
Hider
|
||||
|
||||
// Folder is a special state given to non-objects that are returned during a
|
||||
// List*Objects call with a non-empty Delimiter.
|
||||
Folder
|
||||
)
|
||||
|
||||
// Object returns a reference to the named object in the bucket. Hidden
|
||||
// objects cannot be referenced in this manner; they can only be found by
|
||||
// finding the appropriate reference in ListObjects.
|
||||
func (b *Bucket) Object(name string) *Object {
|
||||
return &Object{
|
||||
name: name,
|
||||
b: b,
|
||||
}
|
||||
}
|
||||
|
||||
// URL returns the full URL to the given object.
|
||||
func (o *Object) URL() string {
|
||||
return fmt.Sprintf("%s/file/%s/%s", o.b.BaseURL(), o.b.Name(), o.name)
|
||||
}
|
||||
|
||||
// NewWriter returns a new writer for the given object. Objects that are
|
||||
// overwritten are not deleted, but are "hidden".
|
||||
//
|
||||
// Callers must close the writer when finished and check the error status.
|
||||
func (o *Object) NewWriter(ctx context.Context) *Writer {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Writer{
|
||||
o: o,
|
||||
name: o.name,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// NewRangeReader returns a reader for the given object, reading up to length
|
||||
// bytes. If length is negative, the rest of the object is read.
|
||||
func (o *Object) NewRangeReader(ctx context.Context, offset, length int64) *Reader {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Reader{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
o: o,
|
||||
name: o.name,
|
||||
chunks: make(map[int]*rchunk),
|
||||
length: length,
|
||||
offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
// NewReader returns a reader for the given object.
|
||||
func (o *Object) NewReader(ctx context.Context) *Reader {
|
||||
return o.NewRangeReader(ctx, 0, -1)
|
||||
}
|
||||
|
||||
func (o *Object) ensure(ctx context.Context) error {
|
||||
if o.f == nil {
|
||||
f, err := o.b.getObject(ctx, o.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.f = f.f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the given object.
|
||||
func (o *Object) Delete(ctx context.Context) error {
|
||||
if err := o.ensure(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return o.f.deleteFileVersion(ctx)
|
||||
}
|
||||
|
||||
// Cursor is passed to ListObjects to return subsequent pages.
|
||||
type Cursor struct {
|
||||
// Prefix limits the listed objects to those that begin with this string.
|
||||
Prefix string
|
||||
|
||||
// Delimiter denotes the path separator. If set, object listings will be
|
||||
// truncated at this character.
|
||||
//
|
||||
// For example, if the bucket contains objects foo/bar, foo/baz, and foo,
|
||||
// then a delimiter of "/" will cause the listing to return "foo" and "foo/".
|
||||
// Otherwise, the listing would have returned all object names.
|
||||
//
|
||||
// Note that objects returned that end in the delimiter may not be actual
|
||||
// objects, e.g. you cannot read from (or write to, or delete) an object "foo/",
|
||||
// both because no actual object exists and because B2 disallows object names
|
||||
// that end with "/". If you want to ensure that all objects returned by
|
||||
// ListObjects and ListCurrentObjects are actual objects, leave this unset.
|
||||
Delimiter string
|
||||
|
||||
name string
|
||||
id string
|
||||
}
|
||||
|
||||
// ListObjects returns all objects in the bucket, including multiple versions
|
||||
// of the same object. Cursor may be nil; when passed to a subsequent query,
|
||||
// it will continue the listing.
|
||||
//
|
||||
// ListObjects will return io.EOF when there are no objects left in the bucket,
|
||||
// however it may do so concurrently with the last objects.
|
||||
func (b *Bucket) ListObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
|
||||
if c == nil {
|
||||
c = &Cursor{}
|
||||
}
|
||||
fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.Prefix, c.Delimiter)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var next *Cursor
|
||||
if name != "" && id != "" {
|
||||
next = &Cursor{
|
||||
Prefix: c.Prefix,
|
||||
Delimiter: c.Delimiter,
|
||||
name: name,
|
||||
id: id,
|
||||
}
|
||||
}
|
||||
var objects []*Object
|
||||
for _, f := range fs {
|
||||
objects = append(objects, &Object{
|
||||
name: f.name(),
|
||||
f: f,
|
||||
b: b,
|
||||
})
|
||||
}
|
||||
var rtnErr error
|
||||
if len(objects) == 0 || next == nil {
|
||||
rtnErr = io.EOF
|
||||
}
|
||||
return objects, next, rtnErr
|
||||
}
|
||||
|
||||
// ListCurrentObjects is similar to ListObjects, except that it returns only
|
||||
// current, unhidden objects in the bucket.
|
||||
func (b *Bucket) ListCurrentObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
|
||||
if c == nil {
|
||||
c = &Cursor{}
|
||||
}
|
||||
fs, name, err := b.b.listFileNames(ctx, count, c.name, c.Prefix, c.Delimiter)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var next *Cursor
|
||||
if name != "" {
|
||||
next = &Cursor{
|
||||
Prefix: c.Prefix,
|
||||
Delimiter: c.Delimiter,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
var objects []*Object
|
||||
for _, f := range fs {
|
||||
objects = append(objects, &Object{
|
||||
name: f.name(),
|
||||
f: f,
|
||||
b: b,
|
||||
})
|
||||
}
|
||||
var rtnErr error
|
||||
if len(objects) == 0 || next == nil {
|
||||
rtnErr = io.EOF
|
||||
}
|
||||
return objects, next, rtnErr
|
||||
}
|
||||
|
||||
// Hide hides the object from name-based listing.
|
||||
func (o *Object) Hide(ctx context.Context) error {
|
||||
if err := o.ensure(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := o.b.b.hideFile(ctx, o.name)
|
||||
return err
|
||||
}
|
||||
|
||||
// Reveal unhides (if hidden) the named object. If there are multiple objects
|
||||
// of a given name, it will reveal the most recent.
|
||||
func (b *Bucket) Reveal(ctx context.Context, name string) error {
|
||||
cur := &Cursor{
|
||||
name: name,
|
||||
}
|
||||
objs, _, err := b.ListObjects(ctx, 1, cur)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if len(objs) < 1 || objs[0].name != name {
|
||||
return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true}
|
||||
}
|
||||
obj := objs[0]
|
||||
if obj.f.status() != "hide" {
|
||||
return nil
|
||||
}
|
||||
return obj.Delete(ctx)
|
||||
}
|
||||
|
||||
func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) {
|
||||
fr, err := b.b.downloadFileByName(ctx, name, 0, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fr.Close()
|
||||
return &Object{
|
||||
name: name,
|
||||
f: b.b.file(fr.id(), name),
|
||||
b: b,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AuthToken returns an authorization token that can be used to access objects
|
||||
// in a private bucket. Only objects that begin with prefix can be accessed.
|
||||
// The token expires after the given duration.
|
||||
func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) {
|
||||
return b.b.getDownloadAuthorization(ctx, prefix, valid)
|
||||
}
|
732
vendor/github.com/kurin/blazer/b2/b2_test.go
generated
vendored
Normal file
732
vendor/github.com/kurin/blazer/b2/b2_test.go
generated
vendored
Normal file
|
@ -0,0 +1,732 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketName = "b2-tests"
|
||||
smallFileName = "Teeny Tiny"
|
||||
largeFileName = "BigBytes"
|
||||
)
|
||||
|
||||
var gmux = &sync.Mutex{}
|
||||
|
||||
type testError struct {
|
||||
retry bool
|
||||
backoff time.Duration
|
||||
reauth bool
|
||||
reupload bool
|
||||
}
|
||||
|
||||
func (t testError) Error() string {
|
||||
return fmt.Sprintf("retry %v; backoff %v; reauth %v; reupload %v", t.retry, t.backoff, t.reauth, t.reupload)
|
||||
}
|
||||
|
||||
type errCont struct {
|
||||
errMap map[string]map[int]error
|
||||
opMap map[string]int
|
||||
}
|
||||
|
||||
func (e *errCont) getError(name string) error {
|
||||
if e.errMap == nil {
|
||||
return nil
|
||||
}
|
||||
if e.opMap == nil {
|
||||
e.opMap = make(map[string]int)
|
||||
}
|
||||
i := e.opMap[name]
|
||||
e.opMap[name]++
|
||||
return e.errMap[name][i]
|
||||
}
|
||||
|
||||
type testRoot struct {
|
||||
errs *errCont
|
||||
auths int
|
||||
bucketMap map[string]map[string]string
|
||||
}
|
||||
|
||||
func (t *testRoot) authorizeAccount(context.Context, string, string, ...ClientOption) error {
|
||||
t.auths++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testRoot) backoff(err error) time.Duration {
|
||||
e, ok := err.(testError)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return e.backoff
|
||||
}
|
||||
|
||||
func (t *testRoot) reauth(err error) bool {
|
||||
e, ok := err.(testError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return e.reauth
|
||||
}
|
||||
|
||||
func (t *testRoot) reupload(err error) bool {
|
||||
e, ok := err.(testError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return e.reupload
|
||||
}
|
||||
|
||||
func (t *testRoot) transient(err error) bool {
|
||||
e, ok := err.(testError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return e.retry || e.reupload || e.backoff > 0
|
||||
}
|
||||
|
||||
func (t *testRoot) createBucket(_ context.Context, name, _ string, _ map[string]string, _ []LifecycleRule) (b2BucketInterface, error) {
|
||||
if err := t.errs.getError("createBucket"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := t.bucketMap[name]; ok {
|
||||
return nil, fmt.Errorf("%s: bucket exists", name)
|
||||
}
|
||||
m := make(map[string]string)
|
||||
t.bucketMap[name] = m
|
||||
return &testBucket{
|
||||
n: name,
|
||||
errs: t.errs,
|
||||
files: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *testRoot) listBuckets(context.Context) ([]b2BucketInterface, error) {
|
||||
var b []b2BucketInterface
|
||||
for k, v := range t.bucketMap {
|
||||
b = append(b, &testBucket{
|
||||
n: k,
|
||||
errs: t.errs,
|
||||
files: v,
|
||||
})
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type testBucket struct {
|
||||
n string
|
||||
errs *errCont
|
||||
files map[string]string
|
||||
}
|
||||
|
||||
func (t *testBucket) name() string { return t.n }
|
||||
func (t *testBucket) btype() string { return "allPrivate" }
|
||||
func (t *testBucket) attrs() *BucketAttrs { return nil }
|
||||
func (t *testBucket) deleteBucket(context.Context) error { return nil }
|
||||
func (t *testBucket) updateBucket(context.Context, *BucketAttrs) error { return nil }
|
||||
|
||||
func (t *testBucket) getUploadURL(context.Context) (b2URLInterface, error) {
|
||||
if err := t.errs.getError("getUploadURL"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &testURL{
|
||||
files: t.files,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *testBucket) startLargeFile(_ context.Context, name, _ string, _ map[string]string) (b2LargeFileInterface, error) {
|
||||
return &testLargeFile{
|
||||
name: name,
|
||||
parts: make(map[int][]byte),
|
||||
files: t.files,
|
||||
errs: t.errs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *testBucket) listFileNames(ctx context.Context, count int, cont, pfx, del string) ([]b2FileInterface, string, error) {
|
||||
var f []string
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
for name := range t.files {
|
||||
f = append(f, name)
|
||||
}
|
||||
sort.Strings(f)
|
||||
idx := sort.SearchStrings(f, cont)
|
||||
var b []b2FileInterface
|
||||
var next string
|
||||
for i := idx; i < len(f) && i-idx < count; i++ {
|
||||
b = append(b, &testFile{
|
||||
n: f[i],
|
||||
s: int64(len(t.files[f[i]])),
|
||||
files: t.files,
|
||||
})
|
||||
if i+1 < len(f) {
|
||||
next = f[i+1]
|
||||
}
|
||||
if i+1 == len(f) {
|
||||
next = ""
|
||||
}
|
||||
}
|
||||
return b, next, nil
|
||||
}
|
||||
|
||||
func (t *testBucket) listFileVersions(ctx context.Context, count int, a, b, c, d string) ([]b2FileInterface, string, string, error) {
|
||||
x, y, z := t.listFileNames(ctx, count, a, c, d)
|
||||
return x, y, "", z
|
||||
}
|
||||
|
||||
func (t *testBucket) downloadFileByName(_ context.Context, name string, offset, size int64) (b2FileReaderInterface, error) {
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
f := t.files[name]
|
||||
end := int(offset + size)
|
||||
if end >= len(f) {
|
||||
end = len(f)
|
||||
}
|
||||
if int(offset) >= len(f) {
|
||||
return nil, errNoMoreContent
|
||||
}
|
||||
return &testFileReader{
|
||||
b: ioutil.NopCloser(bytes.NewBufferString(f[offset:end])),
|
||||
s: end - int(offset),
|
||||
n: name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *testBucket) hideFile(context.Context, string) (b2FileInterface, error) { return nil, nil }
|
||||
func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Duration) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (t *testBucket) baseURL() string { return "" }
|
||||
func (t *testBucket) file(id, name string) b2FileInterface { return nil }
|
||||
|
||||
type testURL struct {
|
||||
files map[string]string
|
||||
}
|
||||
|
||||
func (t *testURL) reload(context.Context) error { return nil }
|
||||
|
||||
func (t *testURL) uploadFile(_ context.Context, r io.Reader, _ int, name, _, _ string, _ map[string]string) (b2FileInterface, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
if _, err := io.Copy(buf, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
t.files[name] = buf.String()
|
||||
return &testFile{
|
||||
n: name,
|
||||
s: int64(len(t.files[name])),
|
||||
files: t.files,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type testLargeFile struct {
|
||||
name string
|
||||
parts map[int][]byte
|
||||
files map[string]string
|
||||
errs *errCont
|
||||
}
|
||||
|
||||
func (t *testLargeFile) finishLargeFile(context.Context) (b2FileInterface, error) {
|
||||
var total []byte
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
for i := 1; i <= len(t.parts); i++ {
|
||||
total = append(total, t.parts[i]...)
|
||||
}
|
||||
t.files[t.name] = string(total)
|
||||
return &testFile{
|
||||
n: t.name,
|
||||
s: int64(len(total)),
|
||||
files: t.files,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *testLargeFile) getUploadPartURL(context.Context) (b2FileChunkInterface, error) {
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
return &testFileChunk{
|
||||
parts: t.parts,
|
||||
errs: t.errs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type testFileChunk struct {
|
||||
parts map[int][]byte
|
||||
errs *errCont
|
||||
}
|
||||
|
||||
func (t *testFileChunk) reload(context.Context) error { return nil }
|
||||
|
||||
func (t *testFileChunk) uploadPart(_ context.Context, r io.Reader, _ string, _, index int) (int, error) {
|
||||
if err := t.errs.getError("uploadPart"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
i, err := io.Copy(buf, r)
|
||||
if err != nil {
|
||||
return int(i), err
|
||||
}
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
t.parts[index] = buf.Bytes()
|
||||
return int(i), nil
|
||||
}
|
||||
|
||||
type testFile struct {
|
||||
n string
|
||||
s int64
|
||||
t time.Time
|
||||
a string
|
||||
files map[string]string
|
||||
}
|
||||
|
||||
func (t *testFile) name() string { return t.n }
|
||||
func (t *testFile) size() int64 { return t.s }
|
||||
func (t *testFile) timestamp() time.Time { return t.t }
|
||||
func (t *testFile) status() string { return t.a }
|
||||
|
||||
func (t *testFile) compileParts(int64, map[int]string) b2LargeFileInterface {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (t *testFile) getFileInfo(context.Context) (b2FileInfoInterface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *testFile) listParts(context.Context, int, int) ([]b2FilePartInterface, int, error) {
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
func (t *testFile) deleteFileVersion(context.Context) error {
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
delete(t.files, t.n)
|
||||
return nil
|
||||
}
|
||||
|
||||
type testFileReader struct {
|
||||
b io.ReadCloser
|
||||
s int
|
||||
n string
|
||||
}
|
||||
|
||||
func (t *testFileReader) Read(p []byte) (int, error) { return t.b.Read(p) }
|
||||
func (t *testFileReader) Close() error { return nil }
|
||||
func (t *testFileReader) stats() (int, string, string, map[string]string) { return t.s, "", "", nil }
|
||||
func (t *testFileReader) id() string { return t.n }
|
||||
|
||||
type zReader struct{}
|
||||
|
||||
var pattern = []byte{0x02, 0x80, 0xff, 0x1a, 0xcc, 0x63, 0x22}
|
||||
|
||||
func (zReader) Read(p []byte) (int, error) {
|
||||
for i := 0; i+len(pattern) < len(p); i += len(pattern) {
|
||||
copy(p[i:], pattern)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestReauth(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
root := &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{
|
||||
errMap: map[string]map[int]error{
|
||||
"createBucket": {0: testError{reauth: true}},
|
||||
},
|
||||
},
|
||||
}
|
||||
client := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: root,
|
||||
},
|
||||
}
|
||||
auths := root.auths
|
||||
if _, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}); err != nil {
|
||||
t.Errorf("bucket should not err, got %v", err)
|
||||
}
|
||||
if root.auths != auths+1 {
|
||||
t.Errorf("client should have re-authenticated; did not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoff(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var calls []time.Duration
|
||||
ch := make(chan time.Time)
|
||||
close(ch)
|
||||
after = func(d time.Duration) <-chan time.Time {
|
||||
calls = append(calls, d)
|
||||
return ch
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
root *testRoot
|
||||
want int
|
||||
}{
|
||||
{
|
||||
root: &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{
|
||||
errMap: map[string]map[int]error{
|
||||
"createBucket": {
|
||||
0: testError{backoff: time.Second},
|
||||
1: testError{backoff: 2 * time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: 2,
|
||||
},
|
||||
{
|
||||
root: &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{
|
||||
errMap: map[string]map[int]error{
|
||||
"getUploadURL": {
|
||||
0: testError{retry: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: 1,
|
||||
},
|
||||
}
|
||||
|
||||
var total int
|
||||
for _, ent := range table {
|
||||
client := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: ent.root,
|
||||
},
|
||||
}
|
||||
b, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
o := b.Object("foo")
|
||||
w := o.NewWriter(ctx)
|
||||
if _, err := io.Copy(w, bytes.NewBufferString("foo")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
total += ent.want
|
||||
}
|
||||
if len(calls) != total {
|
||||
t.Errorf("got %d calls, wanted %d", len(calls), total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffWithoutRetryAfter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var calls []time.Duration
|
||||
ch := make(chan time.Time)
|
||||
close(ch)
|
||||
after = func(d time.Duration) <-chan time.Time {
|
||||
calls = append(calls, d)
|
||||
return ch
|
||||
}
|
||||
|
||||
root := &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{
|
||||
errMap: map[string]map[int]error{
|
||||
"createBucket": {
|
||||
0: testError{retry: true},
|
||||
1: testError{retry: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
client := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: root,
|
||||
},
|
||||
}
|
||||
if _, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}); err != nil {
|
||||
t.Errorf("bucket should not err, got %v", err)
|
||||
}
|
||||
if len(calls) != 2 {
|
||||
t.Errorf("wrong number of backoff calls; got %d, want 2", len(calls))
|
||||
}
|
||||
}
|
||||
|
||||
type badTransport struct{}
|
||||
|
||||
func (badTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
Status: "700 What",
|
||||
StatusCode: 700,
|
||||
Body: ioutil.NopCloser(bytes.NewBufferString("{}")),
|
||||
Request: r,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestCustomTransport(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Sorta fragile but...
|
||||
_, err := NewClient(ctx, "abcd", "efgh", Transport(badTransport{}))
|
||||
if err == nil {
|
||||
t.Error("NewClient returned successfully, expected an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "700") {
|
||||
t.Errorf("Expected nonsense error code 700, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderDoubleClose(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
client := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{},
|
||||
},
|
||||
},
|
||||
}
|
||||
bucket, err := client.NewBucket(ctx, "bucket", &BucketAttrs{Type: Private})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
o, _, err := writeFile(ctx, bucket, "file", 10, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r := o.NewReader(ctx)
|
||||
// Read to EOF, and then read some more.
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWrite(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := bucket.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
sobj, wsha, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := sobj.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := readFile(ctx, sobj, wsha, 1e5, 10); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
lobj, wshaL, err := writeFile(ctx, bucket, largeFileName, 1e6-1e5, 1e4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := lobj.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := readFile(ctx, lobj, wshaL, 1e7, 10); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadRangeReturnsRight(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := bucket.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
obj, _, err := writeFile(ctx, bucket, "file", 1e6+42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r := obj.NewRangeReader(ctx, 200, 1400)
|
||||
r.ChunkSize = 1000
|
||||
|
||||
i, err := io.Copy(ioutil.Discard, r)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if i != 1400 {
|
||||
t.Errorf("NewRangeReader(_, 200, 1400): want 1400, got %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterReturnsError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := &Client{
|
||||
backend: &beRoot{
|
||||
b2i: &testRoot{
|
||||
bucketMap: make(map[string]map[string]string),
|
||||
errs: &errCont{
|
||||
errMap: map[string]map[int]error{
|
||||
"uploadPart": {
|
||||
0: testError{},
|
||||
1: testError{},
|
||||
2: testError{},
|
||||
3: testError{},
|
||||
4: testError{},
|
||||
5: testError{},
|
||||
6: testError{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w := bucket.Object("test").NewWriter(ctx)
|
||||
r := io.LimitReader(zReader{}, 1e7)
|
||||
w.ChunkSize = 1e4
|
||||
w.ConcurrentUploads = 4
|
||||
if _, err := io.Copy(w, r); err == nil {
|
||||
t.Fatalf("io.Copy: should have returned an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileBuffer(t *testing.T) {
|
||||
r := io.LimitReader(zReader{}, 1e8)
|
||||
w, err := newFileBuffer("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err := io.Copy(w, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bReader, err := w.Reader()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hsh := sha1.New()
|
||||
if _, err := io.Copy(hsh, bReader); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hshText := fmt.Sprintf("%x", hsh.Sum(nil))
|
||||
if hshText != w.Hash() {
|
||||
t.Errorf("hashes are not equal: bufferWriter is %q, read buffer is %q", w.Hash(), hshText)
|
||||
}
|
||||
}
|
||||
|
||||
func writeFile(ctx context.Context, bucket *Bucket, name string, size int64, csize int) (*Object, string, error) {
|
||||
r := io.LimitReader(zReader{}, size)
|
||||
o := bucket.Object(name)
|
||||
f := o.NewWriter(ctx)
|
||||
h := sha1.New()
|
||||
w := io.MultiWriter(f, h)
|
||||
f.ConcurrentUploads = 5
|
||||
f.ChunkSize = csize
|
||||
if _, err := io.Copy(w, r); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return o, fmt.Sprintf("%x", h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func readFile(ctx context.Context, obj *Object, sha string, chunk, concur int) error {
|
||||
r := obj.NewReader(ctx)
|
||||
r.ChunkSize = chunk
|
||||
r.ConcurrentDownloads = concur
|
||||
h := sha1.New()
|
||||
if _, err := io.Copy(h, r); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
rsha := fmt.Sprintf("%x", h.Sum(nil))
|
||||
if sha != rsha {
|
||||
return fmt.Errorf("bad hash: got %s, want %s", rsha, sha)
|
||||
}
|
||||
return nil
|
||||
}
|
670
vendor/github.com/kurin/blazer/b2/backend.go
generated
vendored
Normal file
670
vendor/github.com/kurin/blazer/b2/backend.go
generated
vendored
Normal file
|
@ -0,0 +1,670 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// This file wraps the baseline interfaces with backoff and retry semantics.
|
||||
|
||||
type beRootInterface interface {
|
||||
backoff(error) time.Duration
|
||||
reauth(error) bool
|
||||
transient(error) bool
|
||||
reupload(error) bool
|
||||
authorizeAccount(context.Context, string, string, ...ClientOption) error
|
||||
reauthorizeAccount(context.Context) error
|
||||
createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error)
|
||||
listBuckets(context.Context) ([]beBucketInterface, error)
|
||||
}
|
||||
|
||||
type beRoot struct {
|
||||
account, key string
|
||||
b2i b2RootInterface
|
||||
}
|
||||
|
||||
type beBucketInterface interface {
|
||||
name() string
|
||||
btype() BucketType
|
||||
attrs() *BucketAttrs
|
||||
updateBucket(context.Context, *BucketAttrs) error
|
||||
deleteBucket(context.Context) error
|
||||
getUploadURL(context.Context) (beURLInterface, error)
|
||||
startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (beLargeFileInterface, error)
|
||||
listFileNames(context.Context, int, string, string, string) ([]beFileInterface, string, error)
|
||||
listFileVersions(context.Context, int, string, string, string, string) ([]beFileInterface, string, string, error)
|
||||
downloadFileByName(context.Context, string, int64, int64) (beFileReaderInterface, error)
|
||||
hideFile(context.Context, string) (beFileInterface, error)
|
||||
getDownloadAuthorization(context.Context, string, time.Duration) (string, error)
|
||||
baseURL() string
|
||||
file(string, string) beFileInterface
|
||||
}
|
||||
|
||||
type beBucket struct {
|
||||
b2bucket b2BucketInterface
|
||||
ri beRootInterface
|
||||
}
|
||||
|
||||
type beURLInterface interface {
|
||||
uploadFile(context.Context, io.ReadSeeker, int, string, string, string, map[string]string) (beFileInterface, error)
|
||||
}
|
||||
|
||||
type beURL struct {
|
||||
b2url b2URLInterface
|
||||
ri beRootInterface
|
||||
}
|
||||
|
||||
type beFileInterface interface {
|
||||
name() string
|
||||
size() int64
|
||||
timestamp() time.Time
|
||||
status() string
|
||||
deleteFileVersion(context.Context) error
|
||||
getFileInfo(context.Context) (beFileInfoInterface, error)
|
||||
listParts(context.Context, int, int) ([]beFilePartInterface, int, error)
|
||||
compileParts(int64, map[int]string) beLargeFileInterface
|
||||
}
|
||||
|
||||
type beFile struct {
|
||||
b2file b2FileInterface
|
||||
url beURLInterface
|
||||
ri beRootInterface
|
||||
}
|
||||
|
||||
type beLargeFileInterface interface {
|
||||
finishLargeFile(context.Context) (beFileInterface, error)
|
||||
getUploadPartURL(context.Context) (beFileChunkInterface, error)
|
||||
}
|
||||
|
||||
type beLargeFile struct {
|
||||
b2largeFile b2LargeFileInterface
|
||||
ri beRootInterface
|
||||
}
|
||||
|
||||
type beFileChunkInterface interface {
|
||||
reload(context.Context) error
|
||||
uploadPart(context.Context, io.ReadSeeker, string, int, int) (int, error)
|
||||
}
|
||||
|
||||
type beFileChunk struct {
|
||||
b2fileChunk b2FileChunkInterface
|
||||
ri beRootInterface
|
||||
}
|
||||
|
||||
type beFileReaderInterface interface {
|
||||
io.ReadCloser
|
||||
stats() (int, string, string, map[string]string)
|
||||
id() string
|
||||
}
|
||||
|
||||
type beFileReader struct {
|
||||
b2fileReader b2FileReaderInterface
|
||||
ri beRootInterface
|
||||
}
|
||||
|
||||
type beFileInfoInterface interface {
|
||||
stats() (string, string, int64, string, map[string]string, string, time.Time)
|
||||
}
|
||||
|
||||
type beFilePartInterface interface {
|
||||
number() int
|
||||
sha1() string
|
||||
size() int64
|
||||
}
|
||||
|
||||
type beFilePart struct {
|
||||
b2filePart b2FilePartInterface
|
||||
ri beRootInterface
|
||||
}
|
||||
|
||||
type beFileInfo struct {
|
||||
name string
|
||||
sha string
|
||||
size int64
|
||||
ct string
|
||||
info map[string]string
|
||||
status string
|
||||
stamp time.Time
|
||||
}
|
||||
|
||||
func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) }
|
||||
func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) }
|
||||
func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) }
|
||||
func (r *beRoot) transient(err error) bool { return r.b2i.transient(err) }
|
||||
|
||||
func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error {
|
||||
f := func() error {
|
||||
if err := r.b2i.authorizeAccount(ctx, account, key, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
r.account = account
|
||||
r.key = key
|
||||
return nil
|
||||
}
|
||||
return withBackoff(ctx, r, f)
|
||||
}
|
||||
|
||||
func (r *beRoot) reauthorizeAccount(ctx context.Context) error {
|
||||
return r.authorizeAccount(ctx, r.account, r.key)
|
||||
}
|
||||
|
||||
func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) {
|
||||
var bi beBucketInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
bucket, err := r.b2i.createBucket(ctx, name, btype, info, rules)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bi = &beBucket{
|
||||
b2bucket: bucket,
|
||||
ri: r,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, r, g)
|
||||
}
|
||||
if err := withBackoff(ctx, r, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
func (r *beRoot) listBuckets(ctx context.Context) ([]beBucketInterface, error) {
|
||||
var buckets []beBucketInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
bs, err := r.b2i.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, b := range bs {
|
||||
buckets = append(buckets, &beBucket{
|
||||
b2bucket: b,
|
||||
ri: r,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, r, g)
|
||||
}
|
||||
if err := withBackoff(ctx, r, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) name() string {
|
||||
return b.b2bucket.name()
|
||||
}
|
||||
|
||||
func (b *beBucket) btype() BucketType {
|
||||
return BucketType(b.b2bucket.btype())
|
||||
}
|
||||
|
||||
func (b *beBucket) attrs() *BucketAttrs {
|
||||
return b.b2bucket.attrs()
|
||||
}
|
||||
|
||||
func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
return b.b2bucket.updateBucket(ctx, attrs)
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
return withBackoff(ctx, b.ri, f)
|
||||
}
|
||||
|
||||
func (b *beBucket) deleteBucket(ctx context.Context) error {
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
return b.b2bucket.deleteBucket(ctx)
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
return withBackoff(ctx, b.ri, f)
|
||||
}
|
||||
|
||||
func (b *beBucket) getUploadURL(ctx context.Context) (beURLInterface, error) {
|
||||
var url beURLInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
u, err := b.b2bucket.getUploadURL(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url = &beURL{
|
||||
b2url: u,
|
||||
ri: b.ri,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (beLargeFileInterface, error) {
|
||||
var file beLargeFileInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
f, err := b.b2bucket.startLargeFile(ctx, name, ct, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file = &beLargeFile{
|
||||
b2largeFile: f,
|
||||
ri: b.ri,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]beFileInterface, string, error) {
|
||||
var cont string
|
||||
var files []beFileInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
fs, c, err := b.b2bucket.listFileNames(ctx, count, continuation, prefix, delimiter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cont = c
|
||||
for _, f := range fs {
|
||||
files = append(files, &beFile{
|
||||
b2file: f,
|
||||
ri: b.ri,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return files, cont, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]beFileInterface, string, string, error) {
|
||||
var name, id string
|
||||
var files []beFileInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
fs, n, d, err := b.b2bucket.listFileVersions(ctx, count, nextName, nextID, prefix, delimiter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name = n
|
||||
id = d
|
||||
for _, f := range fs {
|
||||
files = append(files, &beFile{
|
||||
b2file: f,
|
||||
ri: b.ri,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
return files, name, id, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (beFileReaderInterface, error) {
|
||||
var reader beFileReaderInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader = &beFileReader{
|
||||
b2fileReader: fr,
|
||||
ri: b.ri,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) hideFile(ctx context.Context, name string) (beFileInterface, error) {
|
||||
var file beFileInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
f, err := b.b2bucket.hideFile(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file = &beFile{
|
||||
b2file: f,
|
||||
ri: b.ri,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) {
|
||||
var tok string
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok = t
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
func (b *beBucket) baseURL() string {
|
||||
return b.b2bucket.baseURL()
|
||||
}
|
||||
|
||||
func (b *beBucket) file(id, name string) beFileInterface {
|
||||
return &beFile{
|
||||
b2file: b.b2bucket.file(id, name),
|
||||
ri: b.ri,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *beURL) uploadFile(ctx context.Context, r io.ReadSeeker, size int, name, ct, sha1 string, info map[string]string) (beFileInterface, error) {
|
||||
var file beFileInterface
|
||||
f := func() error {
|
||||
if _, err := r.Seek(0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := b.b2url.uploadFile(ctx, r, size, name, ct, sha1, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file = &beFile{
|
||||
b2file: f,
|
||||
url: b,
|
||||
ri: b.ri,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (b *beFile) deleteFileVersion(ctx context.Context) error {
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
return b.b2file.deleteFileVersion(ctx)
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
return withBackoff(ctx, b.ri, f)
|
||||
}
|
||||
|
||||
func (b *beFile) size() int64 {
|
||||
return b.b2file.size()
|
||||
}
|
||||
|
||||
func (b *beFile) name() string {
|
||||
return b.b2file.name()
|
||||
}
|
||||
|
||||
func (b *beFile) timestamp() time.Time {
|
||||
return b.b2file.timestamp()
|
||||
}
|
||||
|
||||
func (b *beFile) status() string {
|
||||
return b.b2file.status()
|
||||
}
|
||||
|
||||
func (b *beFile) getFileInfo(ctx context.Context) (beFileInfoInterface, error) {
|
||||
var fileInfo beFileInfoInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
fi, err := b.b2file.getFileInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name, sha, size, ct, info, status, stamp := fi.stats()
|
||||
fileInfo = &beFileInfo{
|
||||
name: name,
|
||||
sha: sha,
|
||||
size: size,
|
||||
ct: ct,
|
||||
info: info,
|
||||
status: status,
|
||||
stamp: stamp,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
func (b *beFile) listParts(ctx context.Context, next, count int) ([]beFilePartInterface, int, error) {
|
||||
var fpi []beFilePartInterface
|
||||
var rnxt int
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
ps, n, err := b.b2file.listParts(ctx, next, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rnxt = n
|
||||
for _, p := range ps {
|
||||
fpi = append(fpi, &beFilePart{
|
||||
b2filePart: p,
|
||||
ri: b.ri,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return fpi, rnxt, nil
|
||||
}
|
||||
|
||||
func (b *beFile) compileParts(size int64, seen map[int]string) beLargeFileInterface {
|
||||
return &beLargeFile{
|
||||
b2largeFile: b.b2file.compileParts(size, seen),
|
||||
ri: b.ri,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *beLargeFile) getUploadPartURL(ctx context.Context) (beFileChunkInterface, error) {
|
||||
var chunk beFileChunkInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
fc, err := b.b2largeFile.getUploadPartURL(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunk = &beFileChunk{
|
||||
b2fileChunk: fc,
|
||||
ri: b.ri,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
func (b *beLargeFile) finishLargeFile(ctx context.Context) (beFileInterface, error) {
|
||||
var file beFileInterface
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
f, err := b.b2largeFile.finishLargeFile(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file = &beFile{
|
||||
b2file: f,
|
||||
ri: b.ri,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (b *beFileChunk) reload(ctx context.Context) error {
|
||||
f := func() error {
|
||||
g := func() error {
|
||||
return b.b2fileChunk.reload(ctx)
|
||||
}
|
||||
return withReauth(ctx, b.ri, g)
|
||||
}
|
||||
return withBackoff(ctx, b.ri, f)
|
||||
}
|
||||
|
||||
func (b *beFileChunk) uploadPart(ctx context.Context, r io.ReadSeeker, sha1 string, size, index int) (int, error) {
|
||||
// no re-auth; pass it back up to the caller so they can get an new upload URI and token
|
||||
// TODO: we should handle that here probably
|
||||
var i int
|
||||
f := func() error {
|
||||
if _, err := r.Seek(0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
j, err := b.b2fileChunk.uploadPart(ctx, r, sha1, size, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i = j
|
||||
return nil
|
||||
}
|
||||
if err := withBackoff(ctx, b.ri, f); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (b *beFileReader) Read(p []byte) (int, error) {
|
||||
return b.b2fileReader.Read(p)
|
||||
}
|
||||
|
||||
func (b *beFileReader) Close() error {
|
||||
return b.b2fileReader.Close()
|
||||
}
|
||||
|
||||
func (b *beFileReader) stats() (int, string, string, map[string]string) {
|
||||
return b.b2fileReader.stats()
|
||||
}
|
||||
|
||||
func (b *beFileReader) id() string { return b.b2fileReader.id() }
|
||||
|
||||
func (b *beFileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) {
|
||||
return b.name, b.sha, b.size, b.ct, b.info, b.status, b.stamp
|
||||
}
|
||||
|
||||
func (b *beFilePart) number() int { return b.b2filePart.number() }
|
||||
func (b *beFilePart) sha1() string { return b.b2filePart.sha1() }
|
||||
func (b *beFilePart) size() int64 { return b.b2filePart.size() }
|
||||
|
||||
func jitter(d time.Duration) time.Duration {
|
||||
f := float64(d)
|
||||
f /= 50
|
||||
f += f * (rand.Float64() - 0.5)
|
||||
return time.Duration(f)
|
||||
}
|
||||
|
||||
func getBackoff(d time.Duration) time.Duration {
|
||||
if d > 15*time.Second {
|
||||
return d + jitter(d)
|
||||
}
|
||||
return d*2 + jitter(d*2)
|
||||
}
|
||||
|
||||
var after = time.After
|
||||
|
||||
func withBackoff(ctx context.Context, ri beRootInterface, f func() error) error {
|
||||
backoff := 500 * time.Millisecond
|
||||
for {
|
||||
err := f()
|
||||
if !ri.transient(err) {
|
||||
return err
|
||||
}
|
||||
bo := ri.backoff(err)
|
||||
if bo > 0 {
|
||||
backoff = bo
|
||||
} else {
|
||||
backoff = getBackoff(backoff)
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-after(backoff):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withReauth(ctx context.Context, ri beRootInterface, f func() error) error {
|
||||
err := f()
|
||||
if ri.reauth(err) {
|
||||
if err := ri.reauthorizeAccount(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
err = f()
|
||||
}
|
||||
return err
|
||||
}
|
451
vendor/github.com/kurin/blazer/b2/baseline.go
generated
vendored
Normal file
451
vendor/github.com/kurin/blazer/b2/baseline.go
generated
vendored
Normal file
|
@ -0,0 +1,451 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/kurin/blazer/base"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// This file wraps the base package in a thin layer, for testing. It should be
|
||||
// the only file in b2 that imports base.
|
||||
|
||||
type b2RootInterface interface {
|
||||
authorizeAccount(context.Context, string, string, ...ClientOption) error
|
||||
transient(error) bool
|
||||
backoff(error) time.Duration
|
||||
reauth(error) bool
|
||||
reupload(error) bool
|
||||
createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error)
|
||||
listBuckets(context.Context) ([]b2BucketInterface, error)
|
||||
}
|
||||
|
||||
type b2BucketInterface interface {
|
||||
name() string
|
||||
btype() string
|
||||
attrs() *BucketAttrs
|
||||
updateBucket(context.Context, *BucketAttrs) error
|
||||
deleteBucket(context.Context) error
|
||||
getUploadURL(context.Context) (b2URLInterface, error)
|
||||
startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (b2LargeFileInterface, error)
|
||||
listFileNames(context.Context, int, string, string, string) ([]b2FileInterface, string, error)
|
||||
listFileVersions(context.Context, int, string, string, string, string) ([]b2FileInterface, string, string, error)
|
||||
downloadFileByName(context.Context, string, int64, int64) (b2FileReaderInterface, error)
|
||||
hideFile(context.Context, string) (b2FileInterface, error)
|
||||
getDownloadAuthorization(context.Context, string, time.Duration) (string, error)
|
||||
baseURL() string
|
||||
file(string, string) b2FileInterface
|
||||
}
|
||||
|
||||
type b2URLInterface interface {
|
||||
reload(context.Context) error
|
||||
uploadFile(context.Context, io.Reader, int, string, string, string, map[string]string) (b2FileInterface, error)
|
||||
}
|
||||
|
||||
type b2FileInterface interface {
|
||||
name() string
|
||||
size() int64
|
||||
timestamp() time.Time
|
||||
status() string
|
||||
deleteFileVersion(context.Context) error
|
||||
getFileInfo(context.Context) (b2FileInfoInterface, error)
|
||||
listParts(context.Context, int, int) ([]b2FilePartInterface, int, error)
|
||||
compileParts(int64, map[int]string) b2LargeFileInterface
|
||||
}
|
||||
|
||||
type b2LargeFileInterface interface {
|
||||
finishLargeFile(context.Context) (b2FileInterface, error)
|
||||
getUploadPartURL(context.Context) (b2FileChunkInterface, error)
|
||||
}
|
||||
|
||||
type b2FileChunkInterface interface {
|
||||
reload(context.Context) error
|
||||
uploadPart(context.Context, io.Reader, string, int, int) (int, error)
|
||||
}
|
||||
|
||||
type b2FileReaderInterface interface {
|
||||
io.ReadCloser
|
||||
stats() (int, string, string, map[string]string)
|
||||
id() string
|
||||
}
|
||||
|
||||
type b2FileInfoInterface interface {
|
||||
stats() (string, string, int64, string, map[string]string, string, time.Time) // bleck
|
||||
}
|
||||
|
||||
type b2FilePartInterface interface {
|
||||
number() int
|
||||
sha1() string
|
||||
size() int64
|
||||
}
|
||||
|
||||
type b2Root struct {
|
||||
b *base.B2
|
||||
}
|
||||
|
||||
type b2Bucket struct {
|
||||
b *base.Bucket
|
||||
}
|
||||
|
||||
type b2URL struct {
|
||||
b *base.URL
|
||||
}
|
||||
|
||||
type b2File struct {
|
||||
b *base.File
|
||||
}
|
||||
|
||||
type b2LargeFile struct {
|
||||
b *base.LargeFile
|
||||
}
|
||||
|
||||
type b2FileChunk struct {
|
||||
b *base.FileChunk
|
||||
}
|
||||
|
||||
type b2FileReader struct {
|
||||
b *base.FileReader
|
||||
}
|
||||
|
||||
type b2FileInfo struct {
|
||||
b *base.FileInfo
|
||||
}
|
||||
|
||||
type b2FilePart struct {
|
||||
b *base.FilePart
|
||||
}
|
||||
|
||||
func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error {
|
||||
c := &clientOptions{}
|
||||
for _, f := range opts {
|
||||
f(c)
|
||||
}
|
||||
var aopts []base.AuthOption
|
||||
if c.transport != nil {
|
||||
aopts = append(aopts, base.Transport(c.transport))
|
||||
}
|
||||
if c.failSomeUploads {
|
||||
aopts = append(aopts, base.FailSomeUploads())
|
||||
}
|
||||
if c.expireTokens {
|
||||
aopts = append(aopts, base.ExpireSomeAuthTokens())
|
||||
}
|
||||
if c.capExceeded {
|
||||
aopts = append(aopts, base.ForceCapExceeded())
|
||||
}
|
||||
nb, err := base.AuthorizeAccount(ctx, account, key, aopts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b.b == nil {
|
||||
b.b = nb
|
||||
return nil
|
||||
}
|
||||
b.b.Update(nb)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*b2Root) backoff(err error) time.Duration {
|
||||
if base.Action(err) != base.Retry {
|
||||
return 0
|
||||
}
|
||||
return base.Backoff(err)
|
||||
}
|
||||
|
||||
func (*b2Root) reauth(err error) bool {
|
||||
return base.Action(err) == base.ReAuthenticate
|
||||
}
|
||||
|
||||
func (*b2Root) reupload(err error) bool {
|
||||
return base.Action(err) == base.AttemptNewUpload
|
||||
}
|
||||
|
||||
func (*b2Root) transient(err error) bool {
|
||||
return base.Action(err) == base.Retry
|
||||
}
|
||||
|
||||
func (b *b2Root) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (b2BucketInterface, error) {
|
||||
var baseRules []base.LifecycleRule
|
||||
for _, rule := range rules {
|
||||
baseRules = append(baseRules, base.LifecycleRule{
|
||||
DaysNewUntilHidden: rule.DaysNewUntilHidden,
|
||||
DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
|
||||
Prefix: rule.Prefix,
|
||||
})
|
||||
}
|
||||
bucket, err := b.b.CreateBucket(ctx, name, btype, info, baseRules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2Bucket{bucket}, nil
|
||||
}
|
||||
|
||||
func (b *b2Root) listBuckets(ctx context.Context) ([]b2BucketInterface, error) {
|
||||
buckets, err := b.b.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rtn []b2BucketInterface
|
||||
for _, bucket := range buckets {
|
||||
rtn = append(rtn, &b2Bucket{bucket})
|
||||
}
|
||||
return rtn, err
|
||||
}
|
||||
|
||||
func (b *b2Bucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
|
||||
if attrs == nil {
|
||||
return nil
|
||||
}
|
||||
if attrs.Type != UnknownType {
|
||||
b.b.Type = string(attrs.Type)
|
||||
}
|
||||
if attrs.Info != nil {
|
||||
b.b.Info = attrs.Info
|
||||
}
|
||||
if attrs.LifecycleRules != nil {
|
||||
rules := []base.LifecycleRule{}
|
||||
for _, rule := range attrs.LifecycleRules {
|
||||
rules = append(rules, base.LifecycleRule{
|
||||
DaysNewUntilHidden: rule.DaysNewUntilHidden,
|
||||
DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
|
||||
Prefix: rule.Prefix,
|
||||
})
|
||||
}
|
||||
b.b.LifecycleRules = rules
|
||||
}
|
||||
newBucket, err := b.b.Update(ctx)
|
||||
if err == nil {
|
||||
b.b = newBucket
|
||||
}
|
||||
code, _ := base.Code(err)
|
||||
if code == 409 {
|
||||
return b2err{
|
||||
err: err,
|
||||
isUpdateConflict: true,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *b2Bucket) deleteBucket(ctx context.Context) error {
|
||||
return b.b.DeleteBucket(ctx)
|
||||
}
|
||||
|
||||
func (b *b2Bucket) name() string {
|
||||
return b.b.Name
|
||||
}
|
||||
|
||||
func (b *b2Bucket) btype() string {
|
||||
return b.b.Type
|
||||
}
|
||||
|
||||
func (b *b2Bucket) attrs() *BucketAttrs {
|
||||
var rules []LifecycleRule
|
||||
for _, rule := range b.b.LifecycleRules {
|
||||
rules = append(rules, LifecycleRule{
|
||||
DaysNewUntilHidden: rule.DaysNewUntilHidden,
|
||||
DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
|
||||
Prefix: rule.Prefix,
|
||||
})
|
||||
}
|
||||
return &BucketAttrs{
|
||||
LifecycleRules: rules,
|
||||
Info: b.b.Info,
|
||||
Type: BucketType(b.b.Type),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) {
|
||||
url, err := b.b.GetUploadURL(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2URL{url}, nil
|
||||
}
|
||||
|
||||
func (b *b2Bucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (b2LargeFileInterface, error) {
|
||||
lf, err := b.b.StartLargeFile(ctx, name, ct, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2LargeFile{lf}, nil
|
||||
}
|
||||
|
||||
func (b *b2Bucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]b2FileInterface, string, error) {
|
||||
fs, c, err := b.b.ListFileNames(ctx, count, continuation, prefix, delimiter)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var files []b2FileInterface
|
||||
for _, f := range fs {
|
||||
files = append(files, &b2File{f})
|
||||
}
|
||||
return files, c, nil
|
||||
}
|
||||
|
||||
func (b *b2Bucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]b2FileInterface, string, string, error) {
|
||||
fs, name, id, err := b.b.ListFileVersions(ctx, count, nextName, nextID, prefix, delimiter)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
var files []b2FileInterface
|
||||
for _, f := range fs {
|
||||
files = append(files, &b2File{f})
|
||||
}
|
||||
return files, name, id, nil
|
||||
}
|
||||
|
||||
func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (b2FileReaderInterface, error) {
|
||||
fr, err := b.b.DownloadFileByName(ctx, name, offset, size)
|
||||
if err != nil {
|
||||
code, _ := base.Code(err)
|
||||
switch code {
|
||||
case http.StatusRequestedRangeNotSatisfiable:
|
||||
return nil, errNoMoreContent
|
||||
case http.StatusNotFound:
|
||||
return nil, b2err{err: err, notFoundErr: true}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &b2FileReader{fr}, nil
|
||||
}
|
||||
|
||||
func (b *b2Bucket) hideFile(ctx context.Context, name string) (b2FileInterface, error) {
|
||||
f, err := b.b.HideFile(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2File{f}, nil
|
||||
}
|
||||
|
||||
func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) {
|
||||
return b.b.GetDownloadAuthorization(ctx, p, v)
|
||||
}
|
||||
|
||||
func (b *b2Bucket) baseURL() string {
|
||||
return b.b.BaseURL()
|
||||
}
|
||||
|
||||
func (b *b2Bucket) file(id, name string) b2FileInterface { return &b2File{b.b.File(id, name)} }
|
||||
|
||||
func (b *b2URL) uploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (b2FileInterface, error) {
|
||||
file, err := b.b.UploadFile(ctx, r, size, name, contentType, sha1, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2File{file}, nil
|
||||
}
|
||||
|
||||
func (b *b2URL) reload(ctx context.Context) error {
|
||||
return b.b.Reload(ctx)
|
||||
}
|
||||
|
||||
func (b *b2File) deleteFileVersion(ctx context.Context) error {
|
||||
return b.b.DeleteFileVersion(ctx)
|
||||
}
|
||||
|
||||
func (b *b2File) name() string {
|
||||
return b.b.Name
|
||||
}
|
||||
|
||||
func (b *b2File) size() int64 {
|
||||
return b.b.Size
|
||||
}
|
||||
|
||||
func (b *b2File) timestamp() time.Time {
|
||||
return b.b.Timestamp
|
||||
}
|
||||
|
||||
func (b *b2File) status() string {
|
||||
return b.b.Status
|
||||
}
|
||||
|
||||
func (b *b2File) getFileInfo(ctx context.Context) (b2FileInfoInterface, error) {
|
||||
if b.b.Info != nil {
|
||||
return &b2FileInfo{b.b.Info}, nil
|
||||
}
|
||||
fi, err := b.b.GetFileInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2FileInfo{fi}, nil
|
||||
}
|
||||
|
||||
func (b *b2File) listParts(ctx context.Context, next, count int) ([]b2FilePartInterface, int, error) {
|
||||
parts, n, err := b.b.ListParts(ctx, next, count)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var rtn []b2FilePartInterface
|
||||
for _, part := range parts {
|
||||
rtn = append(rtn, &b2FilePart{part})
|
||||
}
|
||||
return rtn, n, nil
|
||||
}
|
||||
|
||||
func (b *b2File) compileParts(size int64, seen map[int]string) b2LargeFileInterface {
|
||||
return &b2LargeFile{b.b.CompileParts(size, seen)}
|
||||
}
|
||||
|
||||
func (b *b2LargeFile) finishLargeFile(ctx context.Context) (b2FileInterface, error) {
|
||||
f, err := b.b.FinishLargeFile(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2File{f}, nil
|
||||
}
|
||||
|
||||
func (b *b2LargeFile) getUploadPartURL(ctx context.Context) (b2FileChunkInterface, error) {
|
||||
c, err := b.b.GetUploadPartURL(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b2FileChunk{c}, nil
|
||||
}
|
||||
|
||||
func (b *b2FileChunk) reload(ctx context.Context) error {
|
||||
return b.b.Reload(ctx)
|
||||
}
|
||||
|
||||
func (b *b2FileChunk) uploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) {
|
||||
return b.b.UploadPart(ctx, r, sha1, size, index)
|
||||
}
|
||||
|
||||
func (b *b2FileReader) Read(p []byte) (int, error) {
|
||||
return b.b.Read(p)
|
||||
}
|
||||
|
||||
func (b *b2FileReader) Close() error {
|
||||
return b.b.Close()
|
||||
}
|
||||
|
||||
func (b *b2FileReader) stats() (int, string, string, map[string]string) {
|
||||
return b.b.ContentLength, b.b.ContentType, b.b.SHA1, b.b.Info
|
||||
}
|
||||
|
||||
func (b *b2FileReader) id() string { return b.b.ID }
|
||||
|
||||
func (b *b2FileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) {
|
||||
return b.b.Name, b.b.SHA1, b.b.Size, b.b.ContentType, b.b.Info, b.b.Status, b.b.Timestamp
|
||||
}
|
||||
|
||||
func (b *b2FilePart) number() int { return b.b.Number }
|
||||
func (b *b2FilePart) sha1() string { return b.b.SHA1 }
|
||||
func (b *b2FilePart) size() int64 { return b.b.Size }
|
128
vendor/github.com/kurin/blazer/b2/buffer.go
generated
vendored
Normal file
128
vendor/github.com/kurin/blazer/b2/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type writeBuffer interface {
|
||||
io.Writer
|
||||
Len() int
|
||||
Reader() (io.ReadSeeker, error)
|
||||
Hash() string // sha1 or whatever it is
|
||||
Close() error
|
||||
}
|
||||
|
||||
type memoryBuffer struct {
|
||||
buf *bytes.Buffer
|
||||
hsh hash.Hash
|
||||
w io.Writer
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
var bufpool *sync.Pool
|
||||
|
||||
func init() {
|
||||
bufpool = &sync.Pool{}
|
||||
bufpool.New = func() interface{} { return &bytes.Buffer{} }
|
||||
}
|
||||
|
||||
func newMemoryBuffer() *memoryBuffer {
|
||||
mb := &memoryBuffer{
|
||||
hsh: sha1.New(),
|
||||
}
|
||||
mb.buf = bufpool.Get().(*bytes.Buffer)
|
||||
mb.w = io.MultiWriter(mb.hsh, mb.buf)
|
||||
return mb
|
||||
}
|
||||
|
||||
type thing struct {
|
||||
rs io.ReadSeeker
|
||||
t int
|
||||
}
|
||||
|
||||
func (mb *memoryBuffer) Write(p []byte) (int, error) { return mb.w.Write(p) }
|
||||
func (mb *memoryBuffer) Len() int { return mb.buf.Len() }
|
||||
func (mb *memoryBuffer) Reader() (io.ReadSeeker, error) { return bytes.NewReader(mb.buf.Bytes()), nil }
|
||||
func (mb *memoryBuffer) Hash() string { return fmt.Sprintf("%x", mb.hsh.Sum(nil)) }
|
||||
|
||||
func (mb *memoryBuffer) Close() error {
|
||||
mb.mux.Lock()
|
||||
defer mb.mux.Unlock()
|
||||
if mb.buf == nil {
|
||||
return nil
|
||||
}
|
||||
mb.buf.Truncate(0)
|
||||
bufpool.Put(mb.buf)
|
||||
mb.buf = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileBuffer struct {
|
||||
f *os.File
|
||||
hsh hash.Hash
|
||||
w io.Writer
|
||||
s int
|
||||
}
|
||||
|
||||
func newFileBuffer(loc string) (*fileBuffer, error) {
|
||||
f, err := ioutil.TempFile(loc, "blazer")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fb := &fileBuffer{
|
||||
f: f,
|
||||
hsh: sha1.New(),
|
||||
}
|
||||
fb.w = io.MultiWriter(fb.f, fb.hsh)
|
||||
return fb, nil
|
||||
}
|
||||
|
||||
func (fb *fileBuffer) Write(p []byte) (int, error) {
|
||||
n, err := fb.w.Write(p)
|
||||
fb.s += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (fb *fileBuffer) Len() int { return fb.s }
|
||||
func (fb *fileBuffer) Hash() string { return fmt.Sprintf("%x", fb.hsh.Sum(nil)) }
|
||||
|
||||
func (fb *fileBuffer) Reader() (io.ReadSeeker, error) {
|
||||
if _, err := fb.f.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fr{f: fb.f}, nil
|
||||
}
|
||||
|
||||
func (fb *fileBuffer) Close() error {
|
||||
fb.f.Close()
|
||||
return os.Remove(fb.f.Name())
|
||||
}
|
||||
|
||||
// wraps *os.File so that the http package doesn't see it as an io.Closer
|
||||
type fr struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func (r *fr) Read(p []byte) (int, error) { return r.f.Read(p) }
|
||||
func (r *fr) Seek(a int64, b int) (int64, error) { return r.f.Seek(a, b) }
|
799
vendor/github.com/kurin/blazer/b2/integration_test.go
generated
vendored
Normal file
799
vendor/github.com/kurin/blazer/b2/integration_test.go
generated
vendored
Normal file
|
@ -0,0 +1,799 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
apiID = "B2_ACCOUNT_ID"
|
||||
apiKey = "B2_SECRET_KEY"
|
||||
|
||||
errVar = "B2_TRANSIENT_ERRORS"
|
||||
)
|
||||
|
||||
func TestReadWriteLive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
sobj, wsha, err := writeFile(ctx, bucket, smallFileName, 1e6-42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lobj, wshaL, err := writeFile(ctx, bucket, largeFileName, 5e6+5e4, 5e6)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := readFile(ctx, lobj, wshaL, 1e6, 10); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := readFile(ctx, sobj, wsha, 1e5, 10); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
var cur *Cursor
|
||||
for {
|
||||
objs, c, err := bucket.ListObjects(ctx, 100, cur)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, o := range objs {
|
||||
if err := o.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
cur = c
|
||||
}
|
||||
}
|
||||
|
||||
func TestHideShowLive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
// write a file
|
||||
obj, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, err := countObjects(ctx, bucket.ListCurrentObjects)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got != 1 {
|
||||
t.Fatalf("got %d objects, wanted 1", got)
|
||||
}
|
||||
|
||||
// When the hide marker and the object it's hiding were created within the
|
||||
// same second, they can be sorted in the wrong order, causing the object to
|
||||
// fail to be hidden.
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
// hide the file
|
||||
if err := obj.Hide(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, err = countObjects(ctx, bucket.ListCurrentObjects)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got != 0 {
|
||||
t.Fatalf("got %d objects, wanted 0", got)
|
||||
}
|
||||
|
||||
// unhide the file
|
||||
if err := bucket.Reveal(ctx, smallFileName); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// count see the object again
|
||||
got, err = countObjects(ctx, bucket.ListCurrentObjects)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got != 1 {
|
||||
t.Fatalf("got %d objects, wanted 1", got)
|
||||
}
|
||||
}
|
||||
|
||||
type cancelReader struct {
|
||||
r io.Reader
|
||||
n, l int
|
||||
c func()
|
||||
}
|
||||
|
||||
func (c *cancelReader) Read(p []byte) (int, error) {
|
||||
n, err := c.r.Read(p)
|
||||
c.n += n
|
||||
if c.n >= c.l {
|
||||
c.c()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func TestResumeWriter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
bucket, _ := startLiveTest(ctx, t)
|
||||
|
||||
w := bucket.Object("foo").NewWriter(ctx)
|
||||
w.ChunkSize = 5e6
|
||||
r := &cancelReader{
|
||||
r: io.LimitReader(zReader{}, 15e6),
|
||||
l: 6e6,
|
||||
c: cancel,
|
||||
}
|
||||
if _, err := io.Copy(w, r); err != context.Canceled {
|
||||
t.Fatalf("io.Copy: wanted canceled context, got: %v", err)
|
||||
}
|
||||
|
||||
ctx2 := context.Background()
|
||||
ctx2, cancel2 := context.WithTimeout(ctx2, 10*time.Minute)
|
||||
defer cancel2()
|
||||
bucket2, done := startLiveTest(ctx2, t)
|
||||
defer done()
|
||||
w2 := bucket2.Object("foo").NewWriter(ctx2)
|
||||
w2.ChunkSize = 5e6
|
||||
r2 := io.LimitReader(zReader{}, 15e6)
|
||||
h1 := sha1.New()
|
||||
tr := io.TeeReader(r2, h1)
|
||||
w2.Resume = true
|
||||
w2.ConcurrentUploads = 2
|
||||
if _, err := io.Copy(w2, tr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := w2.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
begSHA := fmt.Sprintf("%x", h1.Sum(nil))
|
||||
|
||||
objR := bucket2.Object("foo").NewReader(ctx2)
|
||||
objR.ConcurrentDownloads = 3
|
||||
h2 := sha1.New()
|
||||
if _, err := io.Copy(h2, objR); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := objR.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
endSHA := fmt.Sprintf("%x", h2.Sum(nil))
|
||||
if endSHA != begSHA {
|
||||
t.Errorf("got conflicting hashes: got %q, want %q", endSHA, begSHA)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttrs(t *testing.T) {
|
||||
// TODO: test is flaky
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
attrlist := []*Attrs{
|
||||
&Attrs{
|
||||
ContentType: "jpeg/stream",
|
||||
Info: map[string]string{
|
||||
"one": "a",
|
||||
"two": "b",
|
||||
},
|
||||
},
|
||||
&Attrs{
|
||||
ContentType: "application/MAGICFACE",
|
||||
LastModified: time.Unix(1464370149, 142000000),
|
||||
Info: map[string]string{}, // can't be nil
|
||||
},
|
||||
&Attrs{
|
||||
ContentType: "arbitrarystring",
|
||||
Info: map[string]string{
|
||||
"spaces": "string with spaces",
|
||||
"unicode": "日本語",
|
||||
"special": "&/!@_.~",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
size int64
|
||||
}{
|
||||
{
|
||||
name: "small",
|
||||
size: 1e3,
|
||||
},
|
||||
{
|
||||
name: "large",
|
||||
size: 5e6 + 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, e := range table {
|
||||
for _, attrs := range attrlist {
|
||||
o := bucket.Object(e.name)
|
||||
w := o.NewWriter(ctx).WithAttrs(attrs)
|
||||
if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
gotAttrs, err := bucket.Object(e.name).Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if gotAttrs.ContentType != attrs.ContentType {
|
||||
t.Errorf("bad content-type for %s: got %q, want %q", e.name, gotAttrs.ContentType, attrs.ContentType)
|
||||
}
|
||||
if !reflect.DeepEqual(gotAttrs.Info, attrs.Info) {
|
||||
t.Errorf("bad info for %s: got %#v, want %#v", e.name, gotAttrs.Info, attrs.Info)
|
||||
}
|
||||
if !gotAttrs.LastModified.Equal(attrs.LastModified) {
|
||||
t.Errorf("bad lastmodified time for %s: got %v, want %v", e.name, gotAttrs.LastModified, attrs.LastModified)
|
||||
}
|
||||
if err := o.Delete(ctx); err != nil {
|
||||
t.Errorf("Object(%q).Delete: %v", e.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileBufferLive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
r := io.LimitReader(zReader{}, 1e6)
|
||||
w := bucket.Object("small").NewWriter(ctx)
|
||||
|
||||
w.UseFileBuffer = true
|
||||
|
||||
w.Write(nil)
|
||||
wb, ok := w.w.(*fileBuffer)
|
||||
if !ok {
|
||||
t.Fatalf("writer isn't using file buffer: %T", w.w)
|
||||
}
|
||||
smallTmpName := wb.f.Name()
|
||||
|
||||
if _, err := io.Copy(w, r); err != nil {
|
||||
t.Errorf("creating small file: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("w.Close(): %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(smallTmpName); !os.IsNotExist(err) {
|
||||
t.Errorf("tmp file exists (%s) or other error: %v", smallTmpName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthTokLive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
foo := "foo/bar"
|
||||
baz := "baz/bar"
|
||||
|
||||
fw := bucket.Object(foo).NewWriter(ctx)
|
||||
io.Copy(fw, io.LimitReader(zReader{}, 1e5))
|
||||
if err := fw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bw := bucket.Object(baz).NewWriter(ctx)
|
||||
io.Copy(bw, io.LimitReader(zReader{}, 1e5))
|
||||
if err := bw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tok, err := bucket.AuthToken(ctx, "foo", time.Hour)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
furl := fmt.Sprintf("%s?Authorization=%s", bucket.Object(foo).URL(), tok)
|
||||
frsp, err := http.Get(furl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if frsp.StatusCode != 200 {
|
||||
t.Fatalf("%s: got %s, want 200", furl, frsp.Status)
|
||||
}
|
||||
burl := fmt.Sprintf("%s?Authorization=%s", bucket.Object(baz).URL(), tok)
|
||||
brsp, err := http.Get(burl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if brsp.StatusCode != 401 {
|
||||
t.Fatalf("%s: got %s, want 401", burl, brsp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRangeReaderLive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
io.Copy(buf, io.LimitReader(zReader{}, 3e6))
|
||||
rs := bytes.NewReader(buf.Bytes())
|
||||
|
||||
w := bucket.Object("foobar").NewWriter(ctx)
|
||||
if _, err := io.Copy(w, rs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
offset, length int64
|
||||
size int64 // expected actual size
|
||||
}{
|
||||
{
|
||||
offset: 1e6 - 50,
|
||||
length: 1e6 + 50,
|
||||
size: 1e6 + 50,
|
||||
},
|
||||
{
|
||||
offset: 0,
|
||||
length: -1,
|
||||
size: 3e6,
|
||||
},
|
||||
{
|
||||
offset: 2e6,
|
||||
length: -1,
|
||||
size: 1e6,
|
||||
},
|
||||
{
|
||||
offset: 2e6,
|
||||
length: 2e6,
|
||||
size: 1e6,
|
||||
},
|
||||
{
|
||||
offset: 0,
|
||||
length: 4e6,
|
||||
size: 3e6,
|
||||
},
|
||||
}
|
||||
|
||||
for _, e := range table {
|
||||
if _, err := rs.Seek(e.offset, 0); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
hw := sha1.New()
|
||||
var lr io.Reader
|
||||
lr = rs
|
||||
if e.length >= 0 {
|
||||
lr = io.LimitReader(rs, e.length)
|
||||
}
|
||||
if _, err := io.Copy(hw, lr); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
r := bucket.Object("foobar").NewRangeReader(ctx, e.offset, e.length)
|
||||
defer r.Close()
|
||||
hr := sha1.New()
|
||||
read, err := io.Copy(hr, r)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if read != e.size {
|
||||
t.Errorf("NewRangeReader(_, %d, %d): read %d bytes, wanted %d bytes", e.offset, e.length, read, e.size)
|
||||
}
|
||||
got := fmt.Sprintf("%x", hr.Sum(nil))
|
||||
want := fmt.Sprintf("%x", hw.Sum(nil))
|
||||
if got != want {
|
||||
t.Errorf("NewRangeReader(_, %d, %d): got %q, want %q", e.offset, e.length, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListObjectsWithPrefix(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
foo := "foo/bar"
|
||||
baz := "baz/bar"
|
||||
|
||||
fw := bucket.Object(foo).NewWriter(ctx)
|
||||
io.Copy(fw, io.LimitReader(zReader{}, 1e5))
|
||||
if err := fw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bw := bucket.Object(baz).NewWriter(ctx)
|
||||
io.Copy(bw, io.LimitReader(zReader{}, 1e5))
|
||||
if err := bw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// This is kind of a hack, but
|
||||
type lfun func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)
|
||||
|
||||
for _, f := range []lfun{bucket.ListObjects, bucket.ListCurrentObjects} {
|
||||
c := &Cursor{
|
||||
Prefix: "baz/",
|
||||
}
|
||||
var res []string
|
||||
for {
|
||||
objs, cur, err := f(ctx, 10, c)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Fatalf("bucket.ListObjects: %v", err)
|
||||
}
|
||||
for _, o := range objs {
|
||||
attrs, err := o.Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("(%v).Attrs: %v", o, err)
|
||||
continue
|
||||
}
|
||||
res = append(res, attrs.Name)
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c = cur
|
||||
}
|
||||
|
||||
want := []string{"baz/bar"}
|
||||
if !reflect.DeepEqual(res, want) {
|
||||
t.Errorf("got %v, want %v", res, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func compare(a, b *BucketAttrs) bool {
|
||||
if a == nil {
|
||||
a = &BucketAttrs{}
|
||||
}
|
||||
if b == nil {
|
||||
b = &BucketAttrs{}
|
||||
}
|
||||
|
||||
if a.Type != b.Type && !((a.Type == "" && b.Type == Private) || (a.Type == Private && b.Type == "")) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(a.Info, b.Info) && (len(a.Info) > 0 || len(b.Info) > 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(a.LifecycleRules, b.LifecycleRules)
|
||||
}
|
||||
|
||||
func TestNewBucket(t *testing.T) {
|
||||
id := os.Getenv(apiID)
|
||||
key := os.Getenv(apiKey)
|
||||
if id == "" || key == "" {
|
||||
t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
client, err := NewClient(ctx, id, key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
attrs *BucketAttrs
|
||||
}{
|
||||
{
|
||||
name: "no-attrs",
|
||||
},
|
||||
{
|
||||
name: "only-rules",
|
||||
attrs: &BucketAttrs{
|
||||
LifecycleRules: []LifecycleRule{
|
||||
{
|
||||
Prefix: "whee/",
|
||||
DaysHiddenUntilDeleted: 30,
|
||||
},
|
||||
{
|
||||
Prefix: "whoa/",
|
||||
DaysNewUntilHidden: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only-info",
|
||||
attrs: &BucketAttrs{
|
||||
Info: map[string]string{
|
||||
"this": "that",
|
||||
"other": "thing",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, ent := range table {
|
||||
bucket, err := client.NewBucket(ctx, id+"-"+ent.name, ent.attrs)
|
||||
if err != nil {
|
||||
t.Errorf("%s: NewBucket(%v): %v", ent.name, ent.attrs, err)
|
||||
continue
|
||||
}
|
||||
defer bucket.Delete(ctx)
|
||||
if err := bucket.Update(ctx, nil); err != nil {
|
||||
t.Errorf("%s: Update(ctx, nil): %v", ent.name, err)
|
||||
continue
|
||||
}
|
||||
attrs, err := bucket.Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Attrs(ctx): %v", ent.name, err)
|
||||
continue
|
||||
}
|
||||
if !compare(attrs, ent.attrs) {
|
||||
t.Errorf("%s: attrs disagree: got %v, want %v", ent.name, attrs, ent.attrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDuelingBuckets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
bucket2, done2 := startLiveTest(ctx, t)
|
||||
defer done2()
|
||||
|
||||
attrs, err := bucket.Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attrs2, err := bucket2.Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attrs.Info["food"] = "yum"
|
||||
if err := bucket.Update(ctx, attrs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
attrs2.Info["nails"] = "not"
|
||||
if err := bucket2.Update(ctx, attrs2); !IsUpdateConflict(err) {
|
||||
t.Fatalf("bucket.Update should have failed with IsUpdateConflict; instead failed with %v", err)
|
||||
}
|
||||
|
||||
attrs2, err = bucket2.Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attrs2.Info["nails"] = "not"
|
||||
if err := bucket2.Update(ctx, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := bucket2.Update(ctx, attrs2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotExist(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
if _, err := bucket.Object("not there").Attrs(ctx); !IsNotExist(err) {
|
||||
t.Errorf("IsNotExist() on nonexistent object returned false (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteEmpty(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
_, _, err := writeFile(ctx, bucket, smallFileName, 0, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type rtCounter struct {
|
||||
rt http.RoundTripper
|
||||
trips int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (rt *rtCounter) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
rt.Lock()
|
||||
defer rt.Unlock()
|
||||
rt.trips++
|
||||
return rt.rt.RoundTrip(r)
|
||||
}
|
||||
|
||||
func TestAttrsNoRoundtrip(t *testing.T) {
|
||||
rt := &rtCounter{rt: transport}
|
||||
transport = rt
|
||||
defer func() {
|
||||
transport = rt.rt
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
_, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
objs, _, err := bucket.ListObjects(ctx, 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(objs) != 1 {
|
||||
t.Fatal("unexpected objects: got %d, want 1", len(objs))
|
||||
}
|
||||
|
||||
trips := rt.trips
|
||||
attrs, err := objs[0].Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if attrs.Name != smallFileName {
|
||||
t.Errorf("got the wrong object: got %q, want %q", attrs.Name, smallFileName)
|
||||
}
|
||||
|
||||
if trips != rt.trips {
|
||||
t.Errorf("Attrs() should not have caused any net traffic, but it did: old %d, new %d", trips, rt.trips)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteWithoutName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
_, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := bucket.Object(smallFileName).Delete(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type object struct {
|
||||
o *Object
|
||||
err error
|
||||
}
|
||||
|
||||
func countObjects(ctx context.Context, f func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)) (int, error) {
|
||||
var got int
|
||||
ch := listObjects(ctx, f)
|
||||
for c := range ch {
|
||||
if c.err != nil {
|
||||
return 0, c.err
|
||||
}
|
||||
got++
|
||||
}
|
||||
return got, nil
|
||||
}
|
||||
|
||||
func listObjects(ctx context.Context, f func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)) <-chan object {
|
||||
ch := make(chan object)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
var cur *Cursor
|
||||
for {
|
||||
objs, c, err := f(ctx, 100, cur)
|
||||
if err != nil && err != io.EOF {
|
||||
ch <- object{err: err}
|
||||
return
|
||||
}
|
||||
for _, o := range objs {
|
||||
ch <- object{o: o}
|
||||
}
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
cur = c
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
var transport = http.DefaultTransport
|
||||
|
||||
func startLiveTest(ctx context.Context, t *testing.T) (*Bucket, func()) {
|
||||
id := os.Getenv(apiID)
|
||||
key := os.Getenv(apiKey)
|
||||
if id == "" || key == "" {
|
||||
t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests")
|
||||
return nil, nil
|
||||
}
|
||||
client, err := NewClient(ctx, id, key, FailSomeUploads(), ExpireSomeAuthTokens(), Transport(transport))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil, nil
|
||||
}
|
||||
bucket, err := client.NewBucket(ctx, id+"-"+bucketName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil, nil
|
||||
}
|
||||
f := func() {
|
||||
for c := range listObjects(ctx, bucket.ListObjects) {
|
||||
if c.err != nil {
|
||||
continue
|
||||
}
|
||||
if err := c.o.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
if err := bucket.Delete(ctx); err != nil && !IsNotExist(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
return bucket, f
|
||||
}
|
102
vendor/github.com/kurin/blazer/b2/monitor.go
generated
vendored
Normal file
102
vendor/github.com/kurin/blazer/b2/monitor.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import "fmt"
|
||||
|
||||
// StatusInfo reports information about a client.
|
||||
type StatusInfo struct {
|
||||
Writers map[string]*WriterStatus
|
||||
Readers map[string]*ReaderStatus
|
||||
}
|
||||
|
||||
// WriterStatus reports the status for each writer.
|
||||
type WriterStatus struct {
|
||||
// Progress is a slice of completion ratios. The index of a ratio is its
|
||||
// chunk id less one.
|
||||
Progress []float64
|
||||
}
|
||||
|
||||
// ReaderStatus reports the status for each reader.
|
||||
type ReaderStatus struct {
|
||||
// Progress is a slice of completion ratios. The index of a ratio is its
|
||||
// chunk id less one.
|
||||
Progress []float64
|
||||
}
|
||||
|
||||
// Status returns information about the current state of the client.
|
||||
func (c *Client) Status() *StatusInfo {
|
||||
c.slock.Lock()
|
||||
defer c.slock.Unlock()
|
||||
|
||||
si := &StatusInfo{
|
||||
Writers: make(map[string]*WriterStatus),
|
||||
Readers: make(map[string]*ReaderStatus),
|
||||
}
|
||||
|
||||
for name, w := range c.sWriters {
|
||||
si.Writers[name] = w.status()
|
||||
}
|
||||
|
||||
for name, r := range c.sReaders {
|
||||
si.Readers[name] = r.status()
|
||||
}
|
||||
|
||||
return si
|
||||
}
|
||||
|
||||
func (c *Client) addWriter(w *Writer) {
|
||||
c.slock.Lock()
|
||||
defer c.slock.Unlock()
|
||||
|
||||
if c.sWriters == nil {
|
||||
c.sWriters = make(map[string]*Writer)
|
||||
}
|
||||
|
||||
c.sWriters[fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)] = w
|
||||
}
|
||||
|
||||
func (c *Client) removeWriter(w *Writer) {
|
||||
c.slock.Lock()
|
||||
defer c.slock.Unlock()
|
||||
|
||||
if c.sWriters == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(c.sWriters, fmt.Sprintf("%s/%s", w.o.b.Name(), w.name))
|
||||
}
|
||||
|
||||
func (c *Client) addReader(r *Reader) {
|
||||
c.slock.Lock()
|
||||
defer c.slock.Unlock()
|
||||
|
||||
if c.sReaders == nil {
|
||||
c.sReaders = make(map[string]*Reader)
|
||||
}
|
||||
|
||||
c.sReaders[fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)] = r
|
||||
}
|
||||
|
||||
func (c *Client) removeReader(r *Reader) {
|
||||
c.slock.Lock()
|
||||
defer c.slock.Unlock()
|
||||
|
||||
if c.sReaders == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(c.sReaders, fmt.Sprintf("%s/%s", r.o.b.Name(), r.name))
|
||||
}
|
300
vendor/github.com/kurin/blazer/b2/reader.go
generated
vendored
Normal file
300
vendor/github.com/kurin/blazer/b2/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,300 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/kurin/blazer/internal/blog"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var errNoMoreContent = errors.New("416: out of content")
|
||||
|
||||
// Reader reads files from B2.
|
||||
type Reader struct {
|
||||
// ConcurrentDownloads is the number of simultaneous downloads to pull from
|
||||
// B2. Values greater than one will cause B2 to make multiple HTTP requests
|
||||
// for a given file, increasing available bandwidth at the cost of buffering
|
||||
// the downloads in memory.
|
||||
ConcurrentDownloads int
|
||||
|
||||
// ChunkSize is the size to fetch per ConcurrentDownload. The default is
|
||||
// 10MB.
|
||||
ChunkSize int
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc // cancels ctx
|
||||
o *Object
|
||||
name string
|
||||
offset int64 // the start of the file
|
||||
length int64 // the length to read, or -1
|
||||
csize int // chunk size
|
||||
read int // amount read
|
||||
chwid int // chunks written
|
||||
chrid int // chunks read
|
||||
chbuf chan *rchunk
|
||||
init sync.Once
|
||||
rmux sync.Mutex // guards rcond
|
||||
rcond *sync.Cond
|
||||
chunks map[int]*rchunk
|
||||
|
||||
emux sync.RWMutex // guards err, believe it or not
|
||||
err error
|
||||
|
||||
smux sync.Mutex
|
||||
smap map[int]*meteredReader
|
||||
}
|
||||
|
||||
type rchunk struct {
|
||||
bytes.Buffer
|
||||
final bool
|
||||
}
|
||||
|
||||
// Close frees resources associated with the download.
|
||||
func (r *Reader) Close() error {
|
||||
r.cancel()
|
||||
r.o.b.c.removeReader(r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reader) setErr(err error) {
|
||||
r.emux.Lock()
|
||||
defer r.emux.Unlock()
|
||||
if r.err == nil {
|
||||
r.err = err
|
||||
r.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) setErrNoCancel(err error) {
|
||||
r.emux.Lock()
|
||||
defer r.emux.Unlock()
|
||||
if r.err == nil {
|
||||
r.err = err
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) getErr() error {
|
||||
r.emux.RLock()
|
||||
defer r.emux.RUnlock()
|
||||
return r.err
|
||||
}
|
||||
|
||||
func (r *Reader) thread() {
|
||||
go func() {
|
||||
for {
|
||||
var buf *rchunk
|
||||
select {
|
||||
case b, ok := <-r.chbuf:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
buf = b
|
||||
case <-r.ctx.Done():
|
||||
return
|
||||
}
|
||||
r.rmux.Lock()
|
||||
chunkID := r.chwid
|
||||
r.chwid++
|
||||
r.rmux.Unlock()
|
||||
offset := int64(chunkID*r.csize) + r.offset
|
||||
size := int64(r.csize)
|
||||
if r.length > 0 {
|
||||
if size > r.length {
|
||||
buf.final = true
|
||||
size = r.length
|
||||
}
|
||||
r.length -= size
|
||||
}
|
||||
redo:
|
||||
fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size)
|
||||
if err == errNoMoreContent {
|
||||
// this read generated a 416 so we are entirely past the end of the object
|
||||
buf.final = true
|
||||
r.rmux.Lock()
|
||||
r.chunks[chunkID] = buf
|
||||
r.rmux.Unlock()
|
||||
r.rcond.Broadcast()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
r.setErr(err)
|
||||
r.rcond.Broadcast()
|
||||
return
|
||||
}
|
||||
rsize, _, _, _ := fr.stats()
|
||||
mr := &meteredReader{r: &fakeSeeker{fr}, size: int(rsize)}
|
||||
r.smux.Lock()
|
||||
r.smap[chunkID] = mr
|
||||
r.smux.Unlock()
|
||||
i, err := copyContext(r.ctx, buf, mr)
|
||||
r.smux.Lock()
|
||||
r.smap[chunkID] = nil
|
||||
r.smux.Unlock()
|
||||
if i < int64(rsize) || err == io.ErrUnexpectedEOF {
|
||||
// Probably the network connection was closed early. Retry.
|
||||
blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying", chunkID, i, rsize)
|
||||
buf.Reset()
|
||||
goto redo
|
||||
}
|
||||
if err != nil {
|
||||
r.setErr(err)
|
||||
r.rcond.Broadcast()
|
||||
return
|
||||
}
|
||||
r.rmux.Lock()
|
||||
r.chunks[chunkID] = buf
|
||||
r.rmux.Unlock()
|
||||
r.rcond.Broadcast()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *Reader) curChunk() (*rchunk, error) {
|
||||
ch := make(chan *rchunk)
|
||||
go func() {
|
||||
r.rmux.Lock()
|
||||
defer r.rmux.Unlock()
|
||||
for r.chunks[r.chrid] == nil && r.getErr() == nil && r.ctx.Err() == nil {
|
||||
r.rcond.Wait()
|
||||
}
|
||||
select {
|
||||
case ch <- r.chunks[r.chrid]:
|
||||
case <-r.ctx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case buf := <-ch:
|
||||
return buf, r.getErr()
|
||||
case <-r.ctx.Done():
|
||||
if r.getErr() != nil {
|
||||
return nil, r.getErr()
|
||||
}
|
||||
return nil, r.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) initFunc() {
|
||||
r.smux.Lock()
|
||||
r.smap = make(map[int]*meteredReader)
|
||||
r.smux.Unlock()
|
||||
r.o.b.c.addReader(r)
|
||||
r.rcond = sync.NewCond(&r.rmux)
|
||||
cr := r.ConcurrentDownloads
|
||||
if cr < 1 {
|
||||
cr = 1
|
||||
}
|
||||
if r.ChunkSize < 1 {
|
||||
r.ChunkSize = 1e7
|
||||
}
|
||||
r.csize = r.ChunkSize
|
||||
r.chbuf = make(chan *rchunk, cr)
|
||||
for i := 0; i < cr; i++ {
|
||||
r.thread()
|
||||
r.chbuf <- &rchunk{}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
if err := r.getErr(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// TODO: check the SHA1 hash here and verify it on Close.
|
||||
r.init.Do(r.initFunc)
|
||||
chunk, err := r.curChunk()
|
||||
if err != nil {
|
||||
r.setErrNoCancel(err)
|
||||
return 0, err
|
||||
}
|
||||
n, err := chunk.Read(p)
|
||||
r.read += n
|
||||
if err == io.EOF {
|
||||
if chunk.final {
|
||||
close(r.chbuf)
|
||||
r.setErrNoCancel(err)
|
||||
return n, err
|
||||
}
|
||||
r.chrid++
|
||||
chunk.Reset()
|
||||
r.chbuf <- chunk
|
||||
err = nil
|
||||
}
|
||||
r.setErrNoCancel(err)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *Reader) status() *ReaderStatus {
|
||||
r.smux.Lock()
|
||||
defer r.smux.Unlock()
|
||||
|
||||
rs := &ReaderStatus{
|
||||
Progress: make([]float64, len(r.smap)),
|
||||
}
|
||||
|
||||
for i := 1; i <= len(r.smap); i++ {
|
||||
rs.Progress[i-1] = r.smap[i].done()
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// copied from io.Copy, basically.
|
||||
func copyContext(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := make([]byte, 32*1024)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
err = ctx.Err()
|
||||
return
|
||||
}
|
||||
nr, er := src.Read(buf)
|
||||
if nr > 0 {
|
||||
nw, ew := dst.Write(buf[0:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
}
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
if er == io.EOF {
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
err = er
|
||||
break
|
||||
}
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
// fakeSeeker exists so that we can wrap the http response body (an io.Reader
|
||||
// but not an io.Seeker) into a meteredReader, which will allow us to keep tabs
|
||||
// on how much of the chunk we've read so far.
|
||||
type fakeSeeker struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (fs *fakeSeeker) Seek(int64, int) (int64, error) { return 0, nil }
|
451
vendor/github.com/kurin/blazer/b2/writer.go
generated
vendored
Normal file
451
vendor/github.com/kurin/blazer/b2/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,451 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kurin/blazer/internal/blog"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Writer writes data into Backblaze. It automatically switches to the large
|
||||
// file API if the file exceeds ChunkSize bytes. Due to that and other
|
||||
// Backblaze API details, there is a large buffer.
|
||||
//
|
||||
// Changes to public Writer attributes must be made before the first call to
|
||||
// Write.
|
||||
type Writer struct {
|
||||
// ConcurrentUploads is number of different threads sending data concurrently
|
||||
// to Backblaze for large files. This can increase performance greatly, as
|
||||
// each thread will hit a different endpoint. However, there is a ChunkSize
|
||||
// buffer for each thread. Values less than 1 are equivalent to 1.
|
||||
ConcurrentUploads int
|
||||
|
||||
// Resume an upload. If true, and the upload is a large file, and a file of
|
||||
// the same name was started but not finished, then assume that we are
|
||||
// resuming that file, and don't upload duplicate chunks.
|
||||
Resume bool
|
||||
|
||||
// ChunkSize is the size, in bytes, of each individual part, when writing
|
||||
// large files, and also when determining whether to upload a file normally
|
||||
// or when to split it into parts. The default is 100M (1e8) The minimum is
|
||||
// 5M (5e6); values less than this are not an error, but will fail. The
|
||||
// maximum is 5GB (5e9).
|
||||
ChunkSize int
|
||||
|
||||
// UseFileBuffer controls whether to use an in-memory buffer (the default) or
|
||||
// scratch space on the file system. If this is true, b2 will save chunks in
|
||||
// FileBufferDir.
|
||||
UseFileBuffer bool
|
||||
|
||||
// FileBufferDir specifies the directory where scratch files are kept. If
|
||||
// blank, os.TempDir() is used.
|
||||
FileBufferDir string
|
||||
|
||||
contentType string
|
||||
info map[string]string
|
||||
|
||||
csize int
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ready chan chunk
|
||||
wg sync.WaitGroup
|
||||
start sync.Once
|
||||
once sync.Once
|
||||
done sync.Once
|
||||
file beLargeFileInterface
|
||||
seen map[int]string
|
||||
everStarted bool
|
||||
|
||||
o *Object
|
||||
name string
|
||||
|
||||
cidx int
|
||||
w writeBuffer
|
||||
|
||||
emux sync.RWMutex
|
||||
err error
|
||||
|
||||
smux sync.RWMutex
|
||||
smap map[int]*meteredReader
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
id int
|
||||
buf writeBuffer
|
||||
}
|
||||
|
||||
func (w *Writer) getBuffer() (writeBuffer, error) {
|
||||
if !w.UseFileBuffer {
|
||||
return newMemoryBuffer(), nil
|
||||
}
|
||||
return newFileBuffer(w.FileBufferDir)
|
||||
}
|
||||
|
||||
func (w *Writer) setErr(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
w.emux.Lock()
|
||||
defer w.emux.Unlock()
|
||||
if w.err == nil {
|
||||
blog.V(1).Infof("error writing %s: %v", w.name, err)
|
||||
w.err = err
|
||||
w.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) getErr() error {
|
||||
w.emux.RLock()
|
||||
defer w.emux.RUnlock()
|
||||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) registerChunk(id int, r *meteredReader) {
|
||||
w.smux.Lock()
|
||||
w.smap[id] = r
|
||||
w.smux.Unlock()
|
||||
}
|
||||
|
||||
func (w *Writer) completeChunk(id int) {
|
||||
w.smux.Lock()
|
||||
w.smap[id] = nil
|
||||
w.smux.Unlock()
|
||||
}
|
||||
|
||||
var gid int32
|
||||
|
||||
func (w *Writer) thread() {
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer w.wg.Done()
|
||||
id := atomic.AddInt32(&gid, 1)
|
||||
fc, err := w.file.getUploadPartURL(w.ctx)
|
||||
if err != nil {
|
||||
w.setErr(err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
chunk, ok := <-w.ready
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if sha, ok := w.seen[chunk.id]; ok {
|
||||
if sha != chunk.buf.Hash() {
|
||||
w.setErr(errors.New("resumable upload was requested, but chunks don't match!"))
|
||||
return
|
||||
}
|
||||
chunk.buf.Close()
|
||||
w.completeChunk(chunk.id)
|
||||
blog.V(2).Infof("skipping chunk %d", chunk.id)
|
||||
continue
|
||||
}
|
||||
blog.V(2).Infof("thread %d handling chunk %d", id, chunk.id)
|
||||
r, err := chunk.buf.Reader()
|
||||
if err != nil {
|
||||
w.setErr(err)
|
||||
return
|
||||
}
|
||||
mr := &meteredReader{r: r, size: chunk.buf.Len()}
|
||||
w.registerChunk(chunk.id, mr)
|
||||
sleep := time.Millisecond * 15
|
||||
redo:
|
||||
n, err := fc.uploadPart(w.ctx, mr, chunk.buf.Hash(), chunk.buf.Len(), chunk.id)
|
||||
if n != chunk.buf.Len() || err != nil {
|
||||
if w.o.b.r.reupload(err) {
|
||||
time.Sleep(sleep)
|
||||
sleep *= 2
|
||||
if sleep > time.Second*15 {
|
||||
sleep = time.Second * 15
|
||||
}
|
||||
blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, chunk.buf.Len(), err)
|
||||
f, err := w.file.getUploadPartURL(w.ctx)
|
||||
if err != nil {
|
||||
w.setErr(err)
|
||||
w.completeChunk(chunk.id)
|
||||
chunk.buf.Close() // TODO: log error
|
||||
return
|
||||
}
|
||||
fc = f
|
||||
goto redo
|
||||
}
|
||||
w.setErr(err)
|
||||
w.completeChunk(chunk.id)
|
||||
chunk.buf.Close() // TODO: log error
|
||||
return
|
||||
}
|
||||
w.completeChunk(chunk.id)
|
||||
chunk.buf.Close() // TODO: log error
|
||||
blog.V(2).Infof("chunk %d handled", chunk.id)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (int, error) {
|
||||
w.start.Do(func() {
|
||||
w.everStarted = true
|
||||
w.smux.Lock()
|
||||
w.smap = make(map[int]*meteredReader)
|
||||
w.smux.Unlock()
|
||||
w.o.b.c.addWriter(w)
|
||||
w.csize = w.ChunkSize
|
||||
if w.csize == 0 {
|
||||
w.csize = 1e8
|
||||
}
|
||||
v, err := w.getBuffer()
|
||||
if err != nil {
|
||||
w.setErr(err)
|
||||
return
|
||||
}
|
||||
w.w = v
|
||||
})
|
||||
if err := w.getErr(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
left := w.csize - w.w.Len()
|
||||
if len(p) < left {
|
||||
return w.w.Write(p)
|
||||
}
|
||||
i, err := w.w.Write(p[:left])
|
||||
if err != nil {
|
||||
w.setErr(err)
|
||||
return i, err
|
||||
}
|
||||
if err := w.sendChunk(); err != nil {
|
||||
w.setErr(err)
|
||||
return i, w.getErr()
|
||||
}
|
||||
k, err := w.Write(p[left:])
|
||||
if err != nil {
|
||||
w.setErr(err)
|
||||
}
|
||||
return i + k, err
|
||||
}
|
||||
|
||||
func (w *Writer) simpleWriteFile() error {
|
||||
ue, err := w.o.b.b.getUploadURL(w.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sha1 := w.w.Hash()
|
||||
ctype := w.contentType
|
||||
if ctype == "" {
|
||||
ctype = "application/octet-stream"
|
||||
}
|
||||
r, err := w.w.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mr := &meteredReader{r: r, size: w.w.Len()}
|
||||
w.registerChunk(1, mr)
|
||||
defer w.completeChunk(1)
|
||||
redo:
|
||||
f, err := ue.uploadFile(w.ctx, mr, int(w.w.Len()), w.name, ctype, sha1, w.info)
|
||||
if err != nil {
|
||||
if w.o.b.r.reupload(err) {
|
||||
blog.V(1).Infof("b2 writer: %v; retrying", err)
|
||||
u, err := w.o.b.b.getUploadURL(w.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ue = u
|
||||
goto redo
|
||||
}
|
||||
return err
|
||||
}
|
||||
w.o.f = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) getLargeFile() (beLargeFileInterface, error) {
|
||||
if !w.Resume {
|
||||
ctype := w.contentType
|
||||
if ctype == "" {
|
||||
ctype = "application/octet-stream"
|
||||
}
|
||||
return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info)
|
||||
}
|
||||
next := 1
|
||||
seen := make(map[int]string)
|
||||
var size int64
|
||||
var fi beFileInterface
|
||||
for {
|
||||
cur := &Cursor{name: w.name}
|
||||
objs, _, err := w.o.b.ListObjects(w.ctx, 1, cur)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(objs) < 1 || objs[0].name != w.name {
|
||||
w.Resume = false
|
||||
return w.getLargeFile()
|
||||
}
|
||||
fi = objs[0].f
|
||||
parts, n, err := fi.listParts(w.ctx, next, 100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
next = n
|
||||
for _, p := range parts {
|
||||
seen[p.number()] = p.sha1()
|
||||
size += p.size()
|
||||
}
|
||||
if len(parts) == 0 {
|
||||
break
|
||||
}
|
||||
if next == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
w.seen = make(map[int]string) // copy the map
|
||||
for id, sha := range seen {
|
||||
w.seen[id] = sha
|
||||
}
|
||||
return fi.compileParts(size, seen), nil
|
||||
}
|
||||
|
||||
func (w *Writer) sendChunk() error {
|
||||
var err error
|
||||
w.once.Do(func() {
|
||||
lf, e := w.getLargeFile()
|
||||
if e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
w.file = lf
|
||||
w.ready = make(chan chunk)
|
||||
if w.ConcurrentUploads < 1 {
|
||||
w.ConcurrentUploads = 1
|
||||
}
|
||||
for i := 0; i < w.ConcurrentUploads; i++ {
|
||||
w.thread()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case w.ready <- chunk{
|
||||
id: w.cidx + 1,
|
||||
buf: w.w,
|
||||
}:
|
||||
case <-w.ctx.Done():
|
||||
return w.ctx.Err()
|
||||
}
|
||||
w.cidx++
|
||||
v, err := w.getBuffer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.w = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close satisfies the io.Closer interface. It is critical to check the return
|
||||
// value of Close on all writers.
|
||||
func (w *Writer) Close() error {
|
||||
w.done.Do(func() {
|
||||
if !w.everStarted {
|
||||
return
|
||||
}
|
||||
defer w.o.b.c.removeWriter(w)
|
||||
defer w.w.Close() // TODO: log error
|
||||
if w.cidx == 0 {
|
||||
w.setErr(w.simpleWriteFile())
|
||||
return
|
||||
}
|
||||
if w.w.Len() > 0 {
|
||||
if err := w.sendChunk(); err != nil {
|
||||
w.setErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
close(w.ready)
|
||||
w.wg.Wait()
|
||||
f, err := w.file.finishLargeFile(w.ctx)
|
||||
if err != nil {
|
||||
w.setErr(err)
|
||||
return
|
||||
}
|
||||
w.o.f = f
|
||||
})
|
||||
return w.getErr()
|
||||
}
|
||||
|
||||
// WithAttrs sets the writable attributes of the resulting file to given
|
||||
// values. WithAttrs must be called before the first call to Write.
|
||||
func (w *Writer) WithAttrs(attrs *Attrs) *Writer {
|
||||
w.contentType = attrs.ContentType
|
||||
w.info = make(map[string]string)
|
||||
for k, v := range attrs.Info {
|
||||
w.info[k] = v
|
||||
}
|
||||
if len(w.info) < 10 && !attrs.LastModified.IsZero() {
|
||||
w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6)
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *Writer) status() *WriterStatus {
|
||||
w.smux.RLock()
|
||||
defer w.smux.RUnlock()
|
||||
|
||||
ws := &WriterStatus{
|
||||
Progress: make([]float64, len(w.smap)),
|
||||
}
|
||||
|
||||
for i := 1; i <= len(w.smap); i++ {
|
||||
ws.Progress[i-1] = w.smap[i].done()
|
||||
}
|
||||
|
||||
return ws
|
||||
}
|
||||
|
||||
type meteredReader struct {
|
||||
read int64
|
||||
size int
|
||||
r io.ReadSeeker
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
func (mr *meteredReader) Read(p []byte) (int, error) {
|
||||
mr.mux.Lock()
|
||||
defer mr.mux.Unlock()
|
||||
n, err := mr.r.Read(p)
|
||||
mr.read += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (mr *meteredReader) Seek(offset int64, whence int) (int64, error) {
|
||||
mr.mux.Lock()
|
||||
defer mr.mux.Unlock()
|
||||
mr.read = offset
|
||||
return mr.r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (mr *meteredReader) done() float64 {
|
||||
if mr == nil {
|
||||
return 1
|
||||
}
|
||||
read := float64(atomic.LoadInt64(&mr.read))
|
||||
return read / float64(mr.size)
|
||||
}
|
1092
vendor/github.com/kurin/blazer/base/base.go
generated
vendored
Normal file
1092
vendor/github.com/kurin/blazer/base/base.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
418
vendor/github.com/kurin/blazer/base/integration_test.go
generated
vendored
Normal file
418
vendor/github.com/kurin/blazer/base/integration_test.go
generated
vendored
Normal file
|
@ -0,0 +1,418 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
apiID = "B2_ACCOUNT_ID"
|
||||
apiKey = "B2_SECRET_KEY"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketName = "base-tests"
|
||||
smallFileName = "TeenyTiny"
|
||||
largeFileName = "BigBytes"
|
||||
)
|
||||
|
||||
type zReader struct{}
|
||||
|
||||
func (zReader) Read(p []byte) (int, error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestStorage(t *testing.T) {
|
||||
id := os.Getenv(apiID)
|
||||
key := os.Getenv(apiKey)
|
||||
if id == "" || key == "" {
|
||||
t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests")
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
// b2_authorize_account
|
||||
b2, err := AuthorizeAccount(ctx, id, key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// b2_create_bucket
|
||||
infoKey := "key"
|
||||
infoVal := "val"
|
||||
m := map[string]string{infoKey: infoVal}
|
||||
rules := []LifecycleRule{
|
||||
{
|
||||
Prefix: "what/",
|
||||
DaysNewUntilHidden: 5,
|
||||
},
|
||||
}
|
||||
bname := id + "-" + bucketName
|
||||
bucket, err := b2.CreateBucket(ctx, bname, "", m, rules)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bucket.Info[infoKey] != infoVal {
|
||||
t.Errorf("%s: bucketInfo[%q] got %q, want %q", bucket.Name, infoKey, bucket.Info[infoKey], infoVal)
|
||||
}
|
||||
if len(bucket.LifecycleRules) != 1 {
|
||||
t.Errorf("%s: lifecycle rules: got %d rules, wanted 1", bucket.Name, len(bucket.LifecycleRules))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// b2_delete_bucket
|
||||
if err := bucket.DeleteBucket(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// b2_update_bucket
|
||||
bucket.Info["new"] = "yay"
|
||||
bucket.LifecycleRules = nil // Unset options should be a noop.
|
||||
newBucket, err := bucket.Update(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("%s: update bucket: %v", bucket.Name, err)
|
||||
return
|
||||
}
|
||||
bucket = newBucket
|
||||
if bucket.Info["new"] != "yay" {
|
||||
t.Errorf("%s: info key \"new\": got %s, want \"yay\"", bucket.Name, bucket.Info["new"])
|
||||
}
|
||||
if len(bucket.LifecycleRules) != 1 {
|
||||
t.Errorf("%s: lifecycle rules: got %d rules, wanted 1", bucket.Name, len(bucket.LifecycleRules))
|
||||
}
|
||||
|
||||
// b2_list_buckets
|
||||
buckets, err := b2.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var found bool
|
||||
for _, bucket := range buckets {
|
||||
if bucket.Name == bname {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("%s: new bucket not found", bname)
|
||||
}
|
||||
|
||||
// b2_get_upload_url
|
||||
ue, err := bucket.GetUploadURL(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// b2_upload_file
|
||||
smallFile := io.LimitReader(zReader{}, 1024*50) // 50k
|
||||
hash := sha1.New()
|
||||
buf := &bytes.Buffer{}
|
||||
w := io.MultiWriter(hash, buf)
|
||||
if _, err := io.Copy(w, smallFile); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil))
|
||||
smallInfoMap := map[string]string{
|
||||
"one": "1",
|
||||
"two": "2",
|
||||
}
|
||||
file, err := ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, smallInfoMap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// b2_delete_file_version
|
||||
if err := file.DeleteFileVersion(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// b2_start_large_file
|
||||
largeInfoMap := map[string]string{
|
||||
"one_BILLION": "1e9",
|
||||
"two_TRILLION": "2eSomething, I guess 2e12",
|
||||
}
|
||||
lf, err := bucket.StartLargeFile(ctx, largeFileName, "application/octet-stream", largeInfoMap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// b2_get_upload_part_url
|
||||
fc, err := lf.GetUploadPartURL(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// b2_upload_part
|
||||
largeFile := io.LimitReader(zReader{}, 10e6) // 10M
|
||||
for i := 0; i < 2; i++ {
|
||||
r := io.LimitReader(largeFile, 5e6) // 5M
|
||||
hash := sha1.New()
|
||||
buf := &bytes.Buffer{}
|
||||
w := io.MultiWriter(hash, buf)
|
||||
if _, err := io.Copy(w, r); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if _, err := fc.UploadPart(ctx, buf, fmt.Sprintf("%x", hash.Sum(nil)), buf.Len(), i+1); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// b2_finish_large_file
|
||||
lfile, err := lf.FinishLargeFile(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// b2_get_file_info
|
||||
smallInfo, err := file.GetFileInfo(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareFileAndInfo(t, smallInfo, smallFileName, smallSHA1, smallInfoMap)
|
||||
largeInfo, err := lfile.GetFileInfo(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareFileAndInfo(t, largeInfo, largeFileName, "none", largeInfoMap)
|
||||
|
||||
defer func() {
|
||||
if err := lfile.DeleteFileVersion(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
clf, err := bucket.StartLargeFile(ctx, largeFileName, "application/octet-stream", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// b2_cancel_large_file
|
||||
if err := clf.CancelLargeFile(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// b2_list_file_names
|
||||
files, _, err := bucket.ListFileNames(ctx, 100, "", "", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 2 {
|
||||
t.Errorf("expected 2 files, got %d: %v", len(files), files)
|
||||
}
|
||||
|
||||
// b2_download_file_by_name
|
||||
fr, err := bucket.DownloadFileByName(ctx, smallFileName, 0, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fr.SHA1 != smallSHA1 {
|
||||
t.Errorf("small file SHAs don't match: got %q, want %q", fr.SHA1, smallSHA1)
|
||||
}
|
||||
lbuf := &bytes.Buffer{}
|
||||
if _, err := io.Copy(lbuf, fr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if lbuf.Len() != fr.ContentLength {
|
||||
t.Errorf("small file retreived lengths don't match: got %d, want %d", lbuf.Len(), fr.ContentLength)
|
||||
}
|
||||
|
||||
// b2_hide_file
|
||||
hf, err := bucket.HideFile(ctx, smallFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := hf.DeleteFileVersion(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// b2_list_file_versions
|
||||
files, _, _, err = bucket.ListFileVersions(ctx, 100, "", "", "", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 3 {
|
||||
t.Errorf("expected 3 files, got %d: %v", len(files), files)
|
||||
}
|
||||
|
||||
// b2_get_download_authorization
|
||||
if _, err := bucket.GetDownloadAuthorization(ctx, "foo/", 24*time.Hour); err != nil {
|
||||
t.Errorf("failed to get download auth token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func compareFileAndInfo(t *testing.T, info *FileInfo, name, sha1 string, imap map[string]string) {
|
||||
if info.Name != name {
|
||||
t.Errorf("got %q, want %q", info.Name, name)
|
||||
}
|
||||
if info.SHA1 != sha1 {
|
||||
t.Errorf("got %q, want %q", info.SHA1, sha1)
|
||||
}
|
||||
if !reflect.DeepEqual(info.Info, imap) {
|
||||
t.Errorf("got %v, want %v", info.Info, imap)
|
||||
}
|
||||
}
|
||||
|
||||
// from https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
var testCases = `[
|
||||
{"fullyEncoded": "%20", "minimallyEncoded": "+", "string": " "},
|
||||
{"fullyEncoded": "%21", "minimallyEncoded": "!", "string": "!"},
|
||||
{"fullyEncoded": "%22", "minimallyEncoded": "%22", "string": "\""},
|
||||
{"fullyEncoded": "%23", "minimallyEncoded": "%23", "string": "#"},
|
||||
{"fullyEncoded": "%24", "minimallyEncoded": "$", "string": "$"},
|
||||
{"fullyEncoded": "%25", "minimallyEncoded": "%25", "string": "%"},
|
||||
{"fullyEncoded": "%26", "minimallyEncoded": "%26", "string": "&"},
|
||||
{"fullyEncoded": "%27", "minimallyEncoded": "'", "string": "'"},
|
||||
{"fullyEncoded": "%28", "minimallyEncoded": "(", "string": "("},
|
||||
{"fullyEncoded": "%29", "minimallyEncoded": ")", "string": ")"},
|
||||
{"fullyEncoded": "%2A", "minimallyEncoded": "*", "string": "*"},
|
||||
{"fullyEncoded": "%2B", "minimallyEncoded": "%2B", "string": "+"},
|
||||
{"fullyEncoded": "%2C", "minimallyEncoded": "%2C", "string": ","},
|
||||
{"fullyEncoded": "%2D", "minimallyEncoded": "-", "string": "-"},
|
||||
{"fullyEncoded": "%2E", "minimallyEncoded": ".", "string": "."},
|
||||
{"fullyEncoded": "/", "minimallyEncoded": "/", "string": "/"},
|
||||
{"fullyEncoded": "%30", "minimallyEncoded": "0", "string": "0"},
|
||||
{"fullyEncoded": "%31", "minimallyEncoded": "1", "string": "1"},
|
||||
{"fullyEncoded": "%32", "minimallyEncoded": "2", "string": "2"},
|
||||
{"fullyEncoded": "%33", "minimallyEncoded": "3", "string": "3"},
|
||||
{"fullyEncoded": "%34", "minimallyEncoded": "4", "string": "4"},
|
||||
{"fullyEncoded": "%35", "minimallyEncoded": "5", "string": "5"},
|
||||
{"fullyEncoded": "%36", "minimallyEncoded": "6", "string": "6"},
|
||||
{"fullyEncoded": "%37", "minimallyEncoded": "7", "string": "7"},
|
||||
{"fullyEncoded": "%38", "minimallyEncoded": "8", "string": "8"},
|
||||
{"fullyEncoded": "%39", "minimallyEncoded": "9", "string": "9"},
|
||||
{"fullyEncoded": "%3A", "minimallyEncoded": ":", "string": ":"},
|
||||
{"fullyEncoded": "%3B", "minimallyEncoded": ";", "string": ";"},
|
||||
{"fullyEncoded": "%3C", "minimallyEncoded": "%3C", "string": "<"},
|
||||
{"fullyEncoded": "%3D", "minimallyEncoded": "=", "string": "="},
|
||||
{"fullyEncoded": "%3E", "minimallyEncoded": "%3E", "string": ">"},
|
||||
{"fullyEncoded": "%3F", "minimallyEncoded": "%3F", "string": "?"},
|
||||
{"fullyEncoded": "%40", "minimallyEncoded": "@", "string": "@"},
|
||||
{"fullyEncoded": "%41", "minimallyEncoded": "A", "string": "A"},
|
||||
{"fullyEncoded": "%42", "minimallyEncoded": "B", "string": "B"},
|
||||
{"fullyEncoded": "%43", "minimallyEncoded": "C", "string": "C"},
|
||||
{"fullyEncoded": "%44", "minimallyEncoded": "D", "string": "D"},
|
||||
{"fullyEncoded": "%45", "minimallyEncoded": "E", "string": "E"},
|
||||
{"fullyEncoded": "%46", "minimallyEncoded": "F", "string": "F"},
|
||||
{"fullyEncoded": "%47", "minimallyEncoded": "G", "string": "G"},
|
||||
{"fullyEncoded": "%48", "minimallyEncoded": "H", "string": "H"},
|
||||
{"fullyEncoded": "%49", "minimallyEncoded": "I", "string": "I"},
|
||||
{"fullyEncoded": "%4A", "minimallyEncoded": "J", "string": "J"},
|
||||
{"fullyEncoded": "%4B", "minimallyEncoded": "K", "string": "K"},
|
||||
{"fullyEncoded": "%4C", "minimallyEncoded": "L", "string": "L"},
|
||||
{"fullyEncoded": "%4D", "minimallyEncoded": "M", "string": "M"},
|
||||
{"fullyEncoded": "%4E", "minimallyEncoded": "N", "string": "N"},
|
||||
{"fullyEncoded": "%4F", "minimallyEncoded": "O", "string": "O"},
|
||||
{"fullyEncoded": "%50", "minimallyEncoded": "P", "string": "P"},
|
||||
{"fullyEncoded": "%51", "minimallyEncoded": "Q", "string": "Q"},
|
||||
{"fullyEncoded": "%52", "minimallyEncoded": "R", "string": "R"},
|
||||
{"fullyEncoded": "%53", "minimallyEncoded": "S", "string": "S"},
|
||||
{"fullyEncoded": "%54", "minimallyEncoded": "T", "string": "T"},
|
||||
{"fullyEncoded": "%55", "minimallyEncoded": "U", "string": "U"},
|
||||
{"fullyEncoded": "%56", "minimallyEncoded": "V", "string": "V"},
|
||||
{"fullyEncoded": "%57", "minimallyEncoded": "W", "string": "W"},
|
||||
{"fullyEncoded": "%58", "minimallyEncoded": "X", "string": "X"},
|
||||
{"fullyEncoded": "%59", "minimallyEncoded": "Y", "string": "Y"},
|
||||
{"fullyEncoded": "%5A", "minimallyEncoded": "Z", "string": "Z"},
|
||||
{"fullyEncoded": "%5B", "minimallyEncoded": "%5B", "string": "["},
|
||||
{"fullyEncoded": "%5C", "minimallyEncoded": "%5C", "string": "\\"},
|
||||
{"fullyEncoded": "%5D", "minimallyEncoded": "%5D", "string": "]"},
|
||||
{"fullyEncoded": "%5E", "minimallyEncoded": "%5E", "string": "^"},
|
||||
{"fullyEncoded": "%5F", "minimallyEncoded": "_", "string": "_"},
|
||||
{"fullyEncoded": "%60", "minimallyEncoded": "%60", "string": "` + "`" + `"},
|
||||
{"fullyEncoded": "%61", "minimallyEncoded": "a", "string": "a"},
|
||||
{"fullyEncoded": "%62", "minimallyEncoded": "b", "string": "b"},
|
||||
{"fullyEncoded": "%63", "minimallyEncoded": "c", "string": "c"},
|
||||
{"fullyEncoded": "%64", "minimallyEncoded": "d", "string": "d"},
|
||||
{"fullyEncoded": "%65", "minimallyEncoded": "e", "string": "e"},
|
||||
{"fullyEncoded": "%66", "minimallyEncoded": "f", "string": "f"},
|
||||
{"fullyEncoded": "%67", "minimallyEncoded": "g", "string": "g"},
|
||||
{"fullyEncoded": "%68", "minimallyEncoded": "h", "string": "h"},
|
||||
{"fullyEncoded": "%69", "minimallyEncoded": "i", "string": "i"},
|
||||
{"fullyEncoded": "%6A", "minimallyEncoded": "j", "string": "j"},
|
||||
{"fullyEncoded": "%6B", "minimallyEncoded": "k", "string": "k"},
|
||||
{"fullyEncoded": "%6C", "minimallyEncoded": "l", "string": "l"},
|
||||
{"fullyEncoded": "%6D", "minimallyEncoded": "m", "string": "m"},
|
||||
{"fullyEncoded": "%6E", "minimallyEncoded": "n", "string": "n"},
|
||||
{"fullyEncoded": "%6F", "minimallyEncoded": "o", "string": "o"},
|
||||
{"fullyEncoded": "%70", "minimallyEncoded": "p", "string": "p"},
|
||||
{"fullyEncoded": "%71", "minimallyEncoded": "q", "string": "q"},
|
||||
{"fullyEncoded": "%72", "minimallyEncoded": "r", "string": "r"},
|
||||
{"fullyEncoded": "%73", "minimallyEncoded": "s", "string": "s"},
|
||||
{"fullyEncoded": "%74", "minimallyEncoded": "t", "string": "t"},
|
||||
{"fullyEncoded": "%75", "minimallyEncoded": "u", "string": "u"},
|
||||
{"fullyEncoded": "%76", "minimallyEncoded": "v", "string": "v"},
|
||||
{"fullyEncoded": "%77", "minimallyEncoded": "w", "string": "w"},
|
||||
{"fullyEncoded": "%78", "minimallyEncoded": "x", "string": "x"},
|
||||
{"fullyEncoded": "%79", "minimallyEncoded": "y", "string": "y"},
|
||||
{"fullyEncoded": "%7A", "minimallyEncoded": "z", "string": "z"},
|
||||
{"fullyEncoded": "%7B", "minimallyEncoded": "%7B", "string": "{"},
|
||||
{"fullyEncoded": "%7C", "minimallyEncoded": "%7C", "string": "|"},
|
||||
{"fullyEncoded": "%7D", "minimallyEncoded": "%7D", "string": "}"},
|
||||
{"fullyEncoded": "%7E", "minimallyEncoded": "~", "string": "~"},
|
||||
{"fullyEncoded": "%7F", "minimallyEncoded": "%7F", "string": "\u007f"},
|
||||
{"fullyEncoded": "%E8%87%AA%E7%94%B1", "minimallyEncoded": "%E8%87%AA%E7%94%B1", "string": "\u81ea\u7531"},
|
||||
{"fullyEncoded": "%F0%90%90%80", "minimallyEncoded": "%F0%90%90%80", "string": "\ud801\udc00"}
|
||||
]`
|
||||
|
||||
type testCase struct {
|
||||
Full string `json:"fullyEncoded"`
|
||||
Min string `json:"minimallyEncoded"`
|
||||
Raw string `json:"string"`
|
||||
}
|
||||
|
||||
func TestEscapes(t *testing.T) {
|
||||
dec := json.NewDecoder(strings.NewReader(testCases))
|
||||
var tcs []testCase
|
||||
if err := dec.Decode(&tcs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
en := escape(tc.Raw)
|
||||
if !(en == tc.Full || en == tc.Min) {
|
||||
t.Errorf("encode %q: got %q, want %q or %q", tc.Raw, en, tc.Min, tc.Full)
|
||||
}
|
||||
|
||||
m, err := unescape(tc.Min)
|
||||
if err != nil {
|
||||
t.Errorf("decode %q: %v", tc.Min, err)
|
||||
}
|
||||
if m != tc.Raw {
|
||||
t.Errorf("decode %q: got %q, want %q", tc.Min, m, tc.Raw)
|
||||
}
|
||||
f, err := unescape(tc.Full)
|
||||
if err != nil {
|
||||
t.Errorf("decode %q: %v", tc.Full, err)
|
||||
}
|
||||
if f != tc.Raw {
|
||||
t.Errorf("decode %q: got %q, want %q", tc.Full, f, tc.Raw)
|
||||
}
|
||||
}
|
||||
}
|
81
vendor/github.com/kurin/blazer/base/strings.go
generated
vendored
Normal file
81
vendor/github.com/kurin/blazer/base/strings.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func noEscape(c byte) bool {
|
||||
switch c {
|
||||
case '.', '_', '-', '/', '~', '!', '$', '\'', '(', ')', '*', ';', '=', ':', '@':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func escape(s string) string {
|
||||
// cribbed from url.go, kinda
|
||||
b := &bytes.Buffer{}
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == '/':
|
||||
b.WriteByte(c)
|
||||
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9':
|
||||
b.WriteByte(c)
|
||||
case noEscape(c):
|
||||
b.WriteByte(c)
|
||||
default:
|
||||
fmt.Fprintf(b, "%%%X", c)
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func unescape(s string) (string, error) {
|
||||
b := &bytes.Buffer{}
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
switch c {
|
||||
case '/':
|
||||
b.WriteString("/")
|
||||
case '+':
|
||||
b.WriteString(" ")
|
||||
case '%':
|
||||
if len(s)-i < 3 {
|
||||
return "", errors.New("unescape: bad encoding")
|
||||
}
|
||||
b.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2]))
|
||||
i += 2
|
||||
default:
|
||||
b.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func unhex(c byte) byte {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0'
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10
|
||||
}
|
||||
return 0
|
||||
}
|
134
vendor/github.com/kurin/blazer/examples/simple/simple.go
generated
vendored
Normal file
134
vendor/github.com/kurin/blazer/examples/simple/simple.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is a simple program that will copy named files into or out of B2.
|
||||
//
|
||||
// To copy a file into B2:
|
||||
//
|
||||
// B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple /path/to/file b2://bucket/path/to/dst
|
||||
//
|
||||
// To copy a file out:
|
||||
//
|
||||
// B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple b2://bucket/path/to/file /path/to/dst
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/kurin/blazer/b2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
b2id := os.Getenv("B2_ACCOUNT_ID")
|
||||
b2key := os.Getenv("B2_ACCOUNT_KEY")
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) != 2 {
|
||||
fmt.Printf("Usage:\n\nsimple [src] [dst]\n")
|
||||
return
|
||||
}
|
||||
src, dst := args[0], args[1]
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := b2.NewClient(ctx, b2id, b2key)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
var r io.ReadCloser
|
||||
var w io.WriteCloser
|
||||
|
||||
if strings.HasPrefix(src, "b2://") {
|
||||
reader, err := b2Reader(ctx, c, src)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
r = reader
|
||||
} else {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
r = f
|
||||
}
|
||||
// Readers do not need their errors checked on close. (Also it's a little
|
||||
// silly to defer this in main(), but.)
|
||||
defer r.Close()
|
||||
|
||||
if strings.HasPrefix(dst, "b2://") {
|
||||
writer, err := b2Writer(ctx, c, dst)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
w = writer
|
||||
} else {
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
w = f
|
||||
}
|
||||
|
||||
// Copy and check error.
|
||||
if _, err := io.Copy(w, r); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// It is very important to check the error of the writer.
|
||||
if err := w.Close(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func b2Reader(ctx context.Context, c *b2.Client, path string) (io.ReadCloser, error) {
|
||||
o, err := b2Obj(ctx, c, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o.NewReader(ctx), nil
|
||||
}
|
||||
|
||||
func b2Writer(ctx context.Context, c *b2.Client, path string) (io.WriteCloser, error) {
|
||||
o, err := b2Obj(ctx, c, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o.NewWriter(ctx), nil
|
||||
}
|
||||
|
||||
func b2Obj(ctx context.Context, c *b2.Client, path string) (*b2.Object, error) {
|
||||
uri, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket, err := c.Bucket(ctx, uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// B2 paths must not begin with /, so trim it here.
|
||||
return bucket.Object(strings.TrimPrefix(uri.Path, "/")), nil
|
||||
}
|
229
vendor/github.com/kurin/blazer/internal/b2types/b2types.go
generated
vendored
Normal file
229
vendor/github.com/kurin/blazer/internal/b2types/b2types.go
generated
vendored
Normal file
|
@ -0,0 +1,229 @@
|
|||
// Copyright 2016, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package b2types implements internal types common to the B2 API.
|
||||
package b2types
|
||||
|
||||
// You know what would be amazing? If I could autogen this from like a JSON
|
||||
// file. Wouldn't that be amazing? That would be amazing.
|
||||
|
||||
const (
|
||||
V1api = "/b2api/v1/"
|
||||
)
|
||||
|
||||
type ErrorMessage struct {
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
Msg string `json:"message"`
|
||||
}
|
||||
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"`
|
||||
AuthToken string `json:"authorizationToken"`
|
||||
URI string `json:"apiUrl"`
|
||||
DownloadURI string `json:"downloadUrl"`
|
||||
MinPartSize int `json:"minimumPartSize"`
|
||||
}
|
||||
|
||||
type LifecycleRule struct {
|
||||
DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"`
|
||||
DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"`
|
||||
Prefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
Info map[string]string `json:"bucketInfo"`
|
||||
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
|
||||
}
|
||||
|
||||
type CreateBucketResponse struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
Info map[string]string `json:"bucketInfo"`
|
||||
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
|
||||
Revision int `json:"revision"`
|
||||
}
|
||||
|
||||
type DeleteBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
BucketID string `json:"bucketId"`
|
||||
}
|
||||
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []CreateBucketResponse `json:"buckets"`
|
||||
}
|
||||
|
||||
type UpdateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
BucketID string `json:"bucketId"`
|
||||
// bucketName is a required field according to
|
||||
// https://www.backblaze.com/b2/docs/b2_update_bucket.html.
|
||||
//
|
||||
// However, actually setting it returns 400: unknown field in
|
||||
// com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName
|
||||
//
|
||||
//Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType,omitempty"`
|
||||
Info map[string]string `json:"bucketInfo,omitempty"`
|
||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
||||
IfRevisionIs int `json:"ifRevisionIs,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateBucketResponse CreateBucketResponse
|
||||
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
}
|
||||
|
||||
type GetUploadURLResponse struct {
|
||||
URI string `json:"uploadUrl"`
|
||||
Token string `json:"authorizationToken"`
|
||||
}
|
||||
|
||||
type UploadFileResponse struct {
|
||||
FileID string `json:"fileId"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type DeleteFileVersionRequest struct {
|
||||
Name string `json:"fileName"`
|
||||
FileID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Name string `json:"fileName"`
|
||||
ContentType string `json:"contentType"`
|
||||
Info map[string]string `json:"fileInfo,omitempty"`
|
||||
}
|
||||
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type ListPartsRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
Start int `json:"startPartNumber"`
|
||||
Count int `json:"maxPartCount"`
|
||||
}
|
||||
|
||||
type ListPartsResponse struct {
|
||||
Next int `json:"nextPartNumber"`
|
||||
Parts []struct {
|
||||
ID string `json:"fileId"`
|
||||
Number int `json:"partNumber"`
|
||||
SHA1 string `json:"contentSha1"`
|
||||
Size int64 `json:"contentLength"`
|
||||
} `json:"parts"`
|
||||
}
|
||||
|
||||
type getUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type getUploadPartURLResponse struct {
|
||||
URL string `json:"uploadUrl"`
|
||||
Token string `json:"authorizationToken"`
|
||||
}
|
||||
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
Hashes []string `json:"partSha1Array"`
|
||||
}
|
||||
|
||||
type FinishLargeFileResponse struct {
|
||||
Name string `json:"fileName"`
|
||||
FileID string `json:"fileId"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Count int `json:"maxFileCount"`
|
||||
Continuation string `json:"startFileName,omitempty"`
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Delimiter string `json:"delimiter,omitempty"`
|
||||
}
|
||||
|
||||
type ListFileNamesResponse struct {
|
||||
Continuation string `json:"nextFileName"`
|
||||
Files []GetFileInfoResponse `json:"files"`
|
||||
}
|
||||
|
||||
type ListFileVersionsRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Count int `json:"maxFileCount"`
|
||||
StartName string `json:"startFileName,omitempty"`
|
||||
StartID string `json:"startFileId,omitempty"`
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
Delimiter string `json:"delimiter,omitempty"`
|
||||
}
|
||||
|
||||
type ListFileVersionsResponse struct {
|
||||
NextName string `json:"nextFileName"`
|
||||
NextID string `json:"nextFileId"`
|
||||
Files []GetFileInfoResponse `json:"files"`
|
||||
}
|
||||
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
File string `json:"fileName"`
|
||||
}
|
||||
|
||||
type HideFileResponse struct {
|
||||
ID string `json:"fileId"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"`
|
||||
}
|
||||
|
||||
type GetFileInfoResponse struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"fileName"`
|
||||
SHA1 string `json:"contentSha1"`
|
||||
Size int64 `json:"contentLength"`
|
||||
ContentType string `json:"contentType"`
|
||||
Info map[string]string `json:"fileInfo"`
|
||||
Action string `json:"action"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
}
|
||||
|
||||
type GetDownloadAuthorizationRequest struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Prefix string `json:"fileNamePrefix"`
|
||||
Valid int `json:"validDurationInSeconds"`
|
||||
}
|
||||
|
||||
type GetDownloadAuthorizationResponse struct {
|
||||
BucketID string `json:"bucketId"`
|
||||
Prefix string `json:"fileNamePrefix"`
|
||||
Token string `json:"authorizationToken"`
|
||||
}
|
54
vendor/github.com/kurin/blazer/internal/blog/blog.go
generated
vendored
Normal file
54
vendor/github.com/kurin/blazer/internal/blog/blog.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package blog implements a private logger, in the manner of glog, without
|
||||
// poluting the flag namespace or leaving files all over /tmp.
|
||||
//
|
||||
// It has almost no features, and a bunch of global state.
|
||||
package blog
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var level int32
|
||||
|
||||
type Verbose bool
|
||||
|
||||
func init() {
|
||||
lvl := os.Getenv("B2_LOG_LEVEL")
|
||||
i, err := strconv.ParseInt(lvl, 10, 32)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
level = int32(i)
|
||||
}
|
||||
|
||||
func (v Verbose) Info(a ...interface{}) {
|
||||
if v {
|
||||
log.Print(a...)
|
||||
}
|
||||
}
|
||||
|
||||
func (v Verbose) Infof(format string, a ...interface{}) {
|
||||
if v {
|
||||
log.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
func V(target int32) Verbose {
|
||||
return Verbose(target <= level)
|
||||
}
|
21
vendor/github.com/minio/go-homedir/LICENSE
generated
vendored
Normal file
21
vendor/github.com/minio/go-homedir/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
16
vendor/github.com/minio/go-homedir/README.md
generated
vendored
Normal file
16
vendor/github.com/minio/go-homedir/README.md
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
# go-homedir
|
||||
|
||||
This is a Go library for detecting the user's home directory without
|
||||
the use of cgo, so the library can be used in cross-compilation environments.
|
||||
|
||||
Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
|
||||
for a user, and `homedir.Expand()` to expand the `~` in a path to the home
|
||||
directory.
|
||||
|
||||
**Why not just use `os/user`?** The built-in `os/user` package is not
|
||||
available on certain architectures such as i386 or PNaCl. Additionally
|
||||
it has a cgo dependency on Darwin systems. This means that any Go code
|
||||
that uses that package cannot cross compile. But 99% of the time the
|
||||
use for `os/user` is just to retrieve the home directory, which we can
|
||||
do for the current user without cgo. This library does that, enabling
|
||||
cross-compilation.
|
64
vendor/github.com/minio/go-homedir/dir_posix.go
generated
vendored
Normal file
64
vendor/github.com/minio/go-homedir/dir_posix.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
// +build !windows
|
||||
|
||||
// Copyright 2016 (C) Mitchell Hashimoto
|
||||
// Distributed under the MIT License.
|
||||
|
||||
package homedir
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// dir returns the homedir of current user for all POSIX compatible
|
||||
// operating systems.
|
||||
func dir() (string, error) {
|
||||
// First prefer the HOME environmental variable
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home, nil
|
||||
}
|
||||
|
||||
// user.Current is not implemented for i386 and PNaCL like environments.
|
||||
if currUser, err := user.Current(); err == nil {
|
||||
return currUser.HomeDir, nil
|
||||
}
|
||||
|
||||
// If that fails, try getent
|
||||
var stdout bytes.Buffer
|
||||
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If "getent" is missing, ignore it
|
||||
if err != exec.ErrNotFound {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
|
||||
// username:password:uid:gid:gecos:home:shell
|
||||
passwdParts := strings.SplitN(passwd, ":", 7)
|
||||
if len(passwdParts) > 5 {
|
||||
return passwdParts[5], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If all else fails, try the shell
|
||||
stdout.Reset()
|
||||
cmd = exec.Command("sh", "-c", "cd && pwd")
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
result := strings.TrimSpace(stdout.String())
|
||||
if result == "" {
|
||||
return "", errors.New("blank output when reading home directory")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
28
vendor/github.com/minio/go-homedir/dir_windows.go
generated
vendored
Normal file
28
vendor/github.com/minio/go-homedir/dir_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2016 (C) Mitchell Hashimoto
|
||||
// Distributed under the MIT License.
|
||||
|
||||
package homedir
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
// dir returns the homedir of current user for MS Windows OS.
|
||||
func dir() (string, error) {
|
||||
// First prefer the HOME environmental variable
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home, nil
|
||||
}
|
||||
drive := os.Getenv("HOMEDRIVE")
|
||||
path := os.Getenv("HOMEPATH")
|
||||
home := drive + path
|
||||
if drive == "" || path == "" {
|
||||
home = os.Getenv("USERPROFILE")
|
||||
}
|
||||
if home == "" {
|
||||
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
|
||||
}
|
||||
|
||||
return home, nil
|
||||
}
|
68
vendor/github.com/minio/go-homedir/homedir.go
generated
vendored
Normal file
68
vendor/github.com/minio/go-homedir/homedir.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2016 (C) Mitchell Hashimoto
|
||||
// Distributed under the MIT License.
|
||||
|
||||
// Package homedir implements a portable function to determine current user's homedir.
|
||||
package homedir
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// DisableCache will disable caching of the home directory. Caching is enabled
|
||||
// by default.
|
||||
var DisableCache bool
|
||||
|
||||
var homedirCache string
|
||||
var cacheLock sync.Mutex
|
||||
|
||||
// Dir returns the home directory for the executing user.
|
||||
//
|
||||
// This uses an OS-specific method for discovering the home directory.
|
||||
// An error is returned if a home directory cannot be detected.
|
||||
func Dir() (string, error) {
|
||||
cacheLock.Lock()
|
||||
defer cacheLock.Unlock()
|
||||
|
||||
// Return cached homedir if available.
|
||||
if !DisableCache {
|
||||
if homedirCache != "" {
|
||||
return homedirCache, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Determine OS speific current homedir.
|
||||
result, err := dir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Cache for future lookups.
|
||||
homedirCache = result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Expand expands the path to include the home directory if the path
|
||||
// is prefixed with `~`. If it isn't prefixed with `~`, the path is
|
||||
// returned as-is.
|
||||
func Expand(path string) (string, error) {
|
||||
if len(path) == 0 {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if path[0] != '~' {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
|
||||
return "", errors.New("cannot expand user-specific home dir")
|
||||
}
|
||||
|
||||
dir, err := Dir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(dir, path[1:]), nil
|
||||
}
|
114
vendor/github.com/minio/go-homedir/homedir_test.go
generated
vendored
Normal file
114
vendor/github.com/minio/go-homedir/homedir_test.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
|||
package homedir
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func patchEnv(key, value string) func() {
|
||||
bck := os.Getenv(key)
|
||||
deferFunc := func() {
|
||||
os.Setenv(key, bck)
|
||||
}
|
||||
|
||||
os.Setenv(key, value)
|
||||
return deferFunc
|
||||
}
|
||||
|
||||
func BenchmarkDir(b *testing.B) {
|
||||
// We do this for any "warmups"
|
||||
for i := 0; i < 10; i++ {
|
||||
Dir()
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Dir()
|
||||
}
|
||||
}
|
||||
|
||||
func TestDir(t *testing.T) {
|
||||
// NOTE: This test is not portable. If user.Current() worked
|
||||
// everywhere, we wouldn't need our package in the first place.
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
dir, err := Dir()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if u.HomeDir != dir {
|
||||
t.Fatalf("%#v != %#v", u.HomeDir, dir)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpand(t *testing.T) {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
Input string
|
||||
Output string
|
||||
Err bool
|
||||
}{
|
||||
{
|
||||
"/foo",
|
||||
"/foo",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"~/foo",
|
||||
filepath.Join(u.HomeDir, "foo"),
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"",
|
||||
"",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"~",
|
||||
u.HomeDir,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"~foo/foo",
|
||||
"",
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
actual, err := Expand(tc.Input)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err)
|
||||
}
|
||||
|
||||
if actual != tc.Output {
|
||||
t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual)
|
||||
}
|
||||
}
|
||||
|
||||
DisableCache = true
|
||||
defer func() { DisableCache = false }()
|
||||
defer patchEnv("HOME", "/custom/path/")()
|
||||
expected := filepath.Join("/", "custom", "path", "foo/bar")
|
||||
actual, err := Expand("~/foo/bar")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("No error is expected, got: %v", err)
|
||||
} else if actual != expected {
|
||||
t.Errorf("Expected: %v; actual: %v", expected, actual)
|
||||
}
|
||||
}
|
2
vendor/github.com/minio/minio-go/.gitignore
generated
vendored
Normal file
2
vendor/github.com/minio/minio-go/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*~
|
||||
*.test
|
20
vendor/github.com/minio/minio-go/.travis.yml
generated
vendored
Normal file
20
vendor/github.com/minio/minio-go/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
sudo: false
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- ARCH=i686
|
||||
|
||||
go:
|
||||
- 1.5.3
|
||||
- 1.6
|
||||
- 1.7.4
|
||||
- 1.8
|
||||
|
||||
script:
|
||||
- diff -au <(gofmt -d .) <(printf "")
|
||||
- go vet ./...
|
||||
- go test -short -race -v ./...
|
23
vendor/github.com/minio/minio-go/CONTRIBUTING.md
generated
vendored
Normal file
23
vendor/github.com/minio/minio-go/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
|
||||
### Developer Guidelines
|
||||
|
||||
``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
|
||||
|
||||
* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
|
||||
- Fork it
|
||||
- Create your feature branch (git checkout -b my-new-feature)
|
||||
- Commit your changes (git commit -am 'Add some feature')
|
||||
- Push to the branch (git push origin my-new-feature)
|
||||
- Create new Pull Request
|
||||
|
||||
* When you're ready to create a pull request, be sure to:
|
||||
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
|
||||
- Run `go fmt`
|
||||
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
||||
- Make sure `go test -race ./...` and `go build` completes.
|
||||
NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
|
||||
``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
|
||||
|
||||
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
|
||||
- `minio-go` project is strictly conformant with Golang style
|
||||
- if you happen to observe offending code, please feel free to send a pull request
|
202
vendor/github.com/minio/minio-go/LICENSE
generated
vendored
Normal file
202
vendor/github.com/minio/minio-go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
19
vendor/github.com/minio/minio-go/MAINTAINERS.md
generated
vendored
Normal file
19
vendor/github.com/minio/minio-go/MAINTAINERS.md
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
# For maintainers only
|
||||
|
||||
## Responsibilities
|
||||
|
||||
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
|
||||
|
||||
### Making new releases
|
||||
|
||||
Edit `libraryVersion` constant in `api.go`.
|
||||
|
||||
```
|
||||
$ grep libraryVersion api.go
|
||||
libraryVersion = "0.3.0"
|
||||
```
|
||||
|
||||
```
|
||||
$ git tag 0.3.0
|
||||
$ git push --tags
|
||||
```
|
250
vendor/github.com/minio/minio-go/README.md
generated
vendored
Normal file
250
vendor/github.com/minio/minio-go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [](https://slack.minio.io) [](https://sourcegraph.com/github.com/minio/minio-go?badge)
|
||||
|
||||
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
||||
|
||||
**Supported cloud storage providers:**
|
||||
|
||||
- AWS Signature Version 4
|
||||
- Amazon S3
|
||||
- Minio
|
||||
|
||||
- AWS Signature Version 2
|
||||
- Google Cloud Storage (Compatibility Mode)
|
||||
- Openstack Swift + Swift3 middleware
|
||||
- Ceph Object Gateway
|
||||
- Riak CS
|
||||
|
||||
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
|
||||
|
||||
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
|
||||
|
||||
## Download from Github
|
||||
```sh
|
||||
go get -u github.com/minio/minio-go
|
||||
```
|
||||
|
||||
## Initialize Minio Client
|
||||
Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
|
||||
|
||||
| Parameter | Description|
|
||||
| :--- | :--- |
|
||||
| endpoint | URL to object storage service. |
|
||||
| accessKeyID | Access key is the user ID that uniquely identifies your account. |
|
||||
| secretAccessKey | Secret key is the password to your account. |
|
||||
| secure | Set this value to 'true' to enable secure (HTTPS) access. |
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/minio/minio-go"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "play.minio.io:9000"
|
||||
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
|
||||
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
useSSL := true
|
||||
|
||||
// Initialize minio client object.
|
||||
minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Println("%v", minioClient) // minioClient is now setup
|
||||
```
|
||||
|
||||
## Quick Start Example - File Uploader
|
||||
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
|
||||
|
||||
We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
|
||||
|
||||
### FileUploader.go
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/minio/minio-go"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "play.minio.io:9000"
|
||||
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
|
||||
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
useSSL := true
|
||||
|
||||
// Initialize minio client object.
|
||||
minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Make a new bucket called mymusic.
|
||||
bucketName := "mymusic"
|
||||
location := "us-east-1"
|
||||
|
||||
err = minioClient.MakeBucket(bucketName, location)
|
||||
if err != nil {
|
||||
// Check to see if we already own this bucket (which happens if you run this twice)
|
||||
exists, err := minioClient.BucketExists(bucketName)
|
||||
if err == nil && exists {
|
||||
log.Printf("We already own %s\n", bucketName)
|
||||
} else {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
log.Printf("Successfully created %s\n", bucketName)
|
||||
|
||||
// Upload the zip file
|
||||
objectName := "golden-oldies.zip"
|
||||
filePath := "/tmp/golden-oldies.zip"
|
||||
contentType := "application/zip"
|
||||
|
||||
// Upload the zip file with FPutObject
|
||||
n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
|
||||
}
|
||||
```
|
||||
|
||||
### Run FileUploader
|
||||
```sh
|
||||
go run file-uploader.go
|
||||
2016/08/13 17:03:28 Successfully created mymusic
|
||||
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
|
||||
|
||||
mc ls play/mymusic/
|
||||
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
|
||||
```
|
||||
|
||||
## API Reference
|
||||
The full API Reference is available here.
|
||||
|
||||
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
||||
|
||||
### API Reference : Bucket Operations
|
||||
|
||||
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
|
||||
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
|
||||
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
|
||||
* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
|
||||
* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
|
||||
* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
|
||||
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
|
||||
|
||||
### API Reference : Bucket policy Operations
|
||||
|
||||
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
|
||||
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
|
||||
* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
|
||||
|
||||
### API Reference : Bucket notification Operations
|
||||
|
||||
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
|
||||
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
|
||||
* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
|
||||
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
|
||||
|
||||
### API Reference : File Object Operations
|
||||
|
||||
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
||||
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
||||
|
||||
### API Reference : Object Operations
|
||||
|
||||
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
|
||||
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
|
||||
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
|
||||
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
|
||||
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
|
||||
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
|
||||
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
|
||||
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
||||
|
||||
### API Reference: Encrypted Object Operations
|
||||
|
||||
* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
|
||||
* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
|
||||
|
||||
### API Reference : Presigned Operations
|
||||
|
||||
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
|
||||
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
|
||||
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
||||
|
||||
### API Reference : Client custom settings
|
||||
* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
|
||||
* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
|
||||
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
|
||||
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
|
||||
|
||||
|
||||
## Full Examples
|
||||
|
||||
#### Full Examples : Bucket Operations
|
||||
|
||||
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
|
||||
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
|
||||
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
|
||||
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
|
||||
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
|
||||
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
|
||||
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
|
||||
|
||||
#### Full Examples : Bucket policy Operations
|
||||
|
||||
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
|
||||
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
||||
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
|
||||
|
||||
#### Full Examples : Bucket notification Operations
|
||||
|
||||
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
||||
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
||||
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
|
||||
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
|
||||
|
||||
#### Full Examples : File Object Operations
|
||||
|
||||
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
|
||||
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
|
||||
|
||||
#### Full Examples : Object Operations
|
||||
|
||||
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
|
||||
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
|
||||
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
|
||||
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
|
||||
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
|
||||
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
|
||||
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
|
||||
|
||||
#### Full Examples : Encrypted Object Operations
|
||||
|
||||
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
|
||||
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
|
||||
|
||||
#### Full Examples : Presigned Operations
|
||||
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
||||
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
|
||||
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
||||
|
||||
## Explore Further
|
||||
* [Complete Documentation](https://docs.minio.io)
|
||||
* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
||||
* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
|
||||
|
||||
## Contribute
|
||||
|
||||
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
|
||||
|
||||
[](https://travis-ci.org/minio/minio-go)
|
||||
[](https://ci.appveyor.com/project/harshavardhana/minio-go)
|
||||
|
532
vendor/github.com/minio/minio-go/api-compose-object.go
generated
vendored
Normal file
532
vendor/github.com/minio/minio-go/api-compose-object.go
generated
vendored
Normal file
|
@ -0,0 +1,532 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// SSEInfo - represents Server-Side-Encryption parameters specified by
|
||||
// a user.
|
||||
type SSEInfo struct {
|
||||
key []byte
|
||||
algo string
|
||||
}
|
||||
|
||||
// NewSSEInfo - specifies (binary or un-encoded) encryption key and
|
||||
// algorithm name. If algo is empty, it defaults to "AES256". Ref:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
|
||||
func NewSSEInfo(key []byte, algo string) SSEInfo {
|
||||
if algo == "" {
|
||||
algo = "AES256"
|
||||
}
|
||||
return SSEInfo{key, algo}
|
||||
}
|
||||
|
||||
// internal method that computes SSE-C headers
|
||||
func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cs := ""
|
||||
if isCopySource {
|
||||
cs = "copy-source-"
|
||||
}
|
||||
return map[string]string{
|
||||
"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
|
||||
"x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
|
||||
"x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)),
|
||||
}
|
||||
}
|
||||
|
||||
// GetSSEHeaders - computes and returns headers for SSE-C as key-value
|
||||
// pairs. They can be set as metadata in PutObject* requests (for
|
||||
// encryption) or be set as request headers in `Core.GetObject` (for
|
||||
// decryption).
|
||||
func (s *SSEInfo) GetSSEHeaders() map[string]string {
|
||||
return s.getSSEHeaders(false)
|
||||
}
|
||||
|
||||
// DestinationInfo - type with information about the object to be
|
||||
// created via server-side copy requests, using the Compose API.
|
||||
type DestinationInfo struct {
|
||||
bucket, object string
|
||||
|
||||
// key for encrypting destination
|
||||
encryption *SSEInfo
|
||||
|
||||
// if no user-metadata is provided, it is copied from source
|
||||
// (when there is only once source object in the compose
|
||||
// request)
|
||||
userMetadata map[string]string
|
||||
}
|
||||
|
||||
// NewDestinationInfo - creates a compose-object/copy-source
|
||||
// destination info object.
|
||||
//
|
||||
// `encSSEC` is the key info for server-side-encryption with customer
|
||||
// provided key. If it is nil, no encryption is performed.
|
||||
//
|
||||
// `userMeta` is the user-metadata key-value pairs to be set on the
|
||||
// destination. The keys are automatically prefixed with `x-amz-meta-`
|
||||
// if needed. If nil is passed, and if only a single source (of any
|
||||
// size) is provided in the ComposeObject call, then metadata from the
|
||||
// source is copied to the destination.
|
||||
func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
|
||||
userMeta map[string]string) (d DestinationInfo, err error) {
|
||||
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucket); err != nil {
|
||||
return d, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(object); err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
// Process custom-metadata to remove a `x-amz-meta-` prefix if
|
||||
// present and validate that keys are distinct (after this
|
||||
// prefix removal).
|
||||
m := make(map[string]string)
|
||||
for k, v := range userMeta {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
|
||||
k = k[len("x-amz-meta-"):]
|
||||
}
|
||||
if _, ok := m[k]; ok {
|
||||
return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)
|
||||
}
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
return DestinationInfo{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
encryption: encryptSSEC,
|
||||
userMetadata: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getUserMetaHeadersMap - construct appropriate key-value pairs to send
|
||||
// as headers from metadata map to pass into copy-object request. For
|
||||
// single part copy-object (i.e. non-multipart object), enable the
|
||||
// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to
|
||||
// `REPLACE`, so that metadata headers from the source are not copied
|
||||
// over.
|
||||
func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string {
|
||||
if len(d.userMetadata) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make(map[string]string)
|
||||
if withCopyDirectiveHeader {
|
||||
r["x-amz-metadata-directive"] = "REPLACE"
|
||||
}
|
||||
for k, v := range d.userMetadata {
|
||||
r["x-amz-meta-"+k] = v
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// SourceInfo - represents a source object to be copied, using
|
||||
// server-side copying APIs.
|
||||
type SourceInfo struct {
|
||||
bucket, object string
|
||||
|
||||
start, end int64
|
||||
|
||||
decryptKey *SSEInfo
|
||||
// Headers to send with the upload-part-copy request involving
|
||||
// this source object.
|
||||
Headers http.Header
|
||||
}
|
||||
|
||||
// NewSourceInfo - create a compose-object/copy-object source info
|
||||
// object.
|
||||
//
|
||||
// `decryptSSEC` is the decryption key using server-side-encryption
|
||||
// with customer provided key. It may be nil if the source is not
|
||||
// encrypted.
|
||||
func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo {
|
||||
r := SourceInfo{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
start: -1, // range is unspecified by default
|
||||
decryptKey: decryptSSEC,
|
||||
Headers: make(http.Header),
|
||||
}
|
||||
|
||||
// Set the source header
|
||||
r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
|
||||
|
||||
// Assemble decryption headers for upload-part-copy request
|
||||
for k, v := range decryptSSEC.getSSEHeaders(true) {
|
||||
r.Headers.Set(k, v)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// SetRange - Set the start and end offset of the source object to be
|
||||
// copied. If this method is not called, the whole source object is
|
||||
// copied.
|
||||
func (s *SourceInfo) SetRange(start, end int64) error {
|
||||
if start > end || start < 0 {
|
||||
return ErrInvalidArgument("start must be non-negative, and start must be at most end.")
|
||||
}
|
||||
// Note that 0 <= start <= end
|
||||
s.start, s.end = start, end
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMatchETagCond - Set ETag match condition. The object is copied
|
||||
// only if the etag of the source matches the value given here.
|
||||
func (s *SourceInfo) SetMatchETagCond(etag string) error {
|
||||
if etag == "" {
|
||||
return ErrInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-match", etag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMatchETagExceptCond - Set the ETag match exception
|
||||
// condition. The object is copied only if the etag of the source is
|
||||
// not the value given here.
|
||||
func (s *SourceInfo) SetMatchETagExceptCond(etag string) error {
|
||||
if etag == "" {
|
||||
return ErrInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-none-match", etag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModifiedSinceCond - Set the modified since condition.
|
||||
func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return ErrInvalidArgument("Input time cannot be 0.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUnmodifiedSinceCond - Set the unmodified since condition.
|
||||
func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return ErrInvalidArgument("Input time cannot be 0.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper to fetch size and etag of an object using a StatObject call.
|
||||
func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) {
|
||||
// Get object info - need size and etag here. Also, decryption
|
||||
// headers are added to the stat request if given.
|
||||
var objInfo ObjectInfo
|
||||
rh := NewGetReqHeaders()
|
||||
for k, v := range s.decryptKey.getSSEHeaders(false) {
|
||||
rh.Set(k, v)
|
||||
}
|
||||
objInfo, err = c.statObject(s.bucket, s.object, rh)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)
|
||||
} else {
|
||||
size = objInfo.Size
|
||||
etag = objInfo.ETag
|
||||
userMeta = make(map[string]string)
|
||||
for k, v := range objInfo.Metadata {
|
||||
if strings.HasPrefix(k, "x-amz-meta-") {
|
||||
if len(v) > 0 {
|
||||
userMeta[k] = v[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// uploadPartCopy - helper function to create a part in a multipart
|
||||
// upload via an upload-part-copy request
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||
func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
|
||||
headers http.Header) (p CompletePart, err error) {
|
||||
|
||||
// Build query parameters
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("partNumber", strconv.Itoa(partNumber))
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Send upload-part-copy request
|
||||
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||
bucketName: bucket,
|
||||
objectName: object,
|
||||
customHeader: headers,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
|
||||
// Check if we got an error response.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return p, httpRespToErrorResponse(resp, bucket, object)
|
||||
}
|
||||
|
||||
// Decode copy-part response on success.
|
||||
cpObjRes := copyObjectResult{}
|
||||
err = xmlDecoder(resp.Body, &cpObjRes)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// ComposeObject - creates an object using server-side copying of
|
||||
// existing objects. It takes a list of source objects (with optional
|
||||
// offsets) and concatenates them into a new object using only
|
||||
// server-side copying operations.
|
||||
func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
|
||||
if len(srcs) < 1 || len(srcs) > maxPartsCount {
|
||||
return ErrInvalidArgument("There must be as least one and upto 10000 source objects.")
|
||||
}
|
||||
|
||||
srcSizes := make([]int64, len(srcs))
|
||||
var totalSize, size, totalParts int64
|
||||
var srcUserMeta map[string]string
|
||||
var etag string
|
||||
var err error
|
||||
for i, src := range srcs {
|
||||
size, etag, srcUserMeta, err = src.getProps(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err)
|
||||
}
|
||||
|
||||
// Error out if client side encryption is used in this source object when
|
||||
// more than one source objects are given.
|
||||
if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" {
|
||||
return ErrInvalidArgument(
|
||||
fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
|
||||
}
|
||||
|
||||
// Since we did a HEAD to get size, we use the ETag
|
||||
// value to make sure the object has not changed by
|
||||
// the time we perform the copy. This is done, only if
|
||||
// the user has not set their own ETag match
|
||||
// condition.
|
||||
if src.Headers.Get("x-amz-copy-source-if-match") == "" {
|
||||
src.SetMatchETagCond(etag)
|
||||
}
|
||||
|
||||
// Check if a segment is specified, and if so, is the
|
||||
// segment within object bounds?
|
||||
if src.start != -1 {
|
||||
// Since range is specified,
|
||||
// 0 <= src.start <= src.end
|
||||
// so only invalid case to check is:
|
||||
if src.end >= size {
|
||||
return ErrInvalidArgument(
|
||||
fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)",
|
||||
i, src.start, src.end, size))
|
||||
}
|
||||
size = src.end - src.start + 1
|
||||
}
|
||||
|
||||
// Only the last source may be less than `absMinPartSize`
|
||||
if size < absMinPartSize && i < len(srcs)-1 {
|
||||
return ErrInvalidArgument(
|
||||
fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size))
|
||||
}
|
||||
|
||||
// Is data to copy too large?
|
||||
totalSize += size
|
||||
if totalSize > maxMultipartPutObjectSize {
|
||||
return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
|
||||
}
|
||||
|
||||
// record source size
|
||||
srcSizes[i] = size
|
||||
|
||||
// calculate parts needed for current source
|
||||
totalParts += partsRequired(size)
|
||||
// Do we need more parts than we are allowed?
|
||||
if totalParts > maxPartsCount {
|
||||
return ErrInvalidArgument(fmt.Sprintf(
|
||||
"Your proposed compose object requires more than %d parts", maxPartsCount))
|
||||
}
|
||||
}
|
||||
|
||||
// Single source object case (i.e. when only one source is
|
||||
// involved, it is being copied wholly and at most 5GiB in
|
||||
// size).
|
||||
if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize {
|
||||
h := srcs[0].Headers
|
||||
// Add destination encryption headers
|
||||
for k, v := range dst.encryption.getSSEHeaders(false) {
|
||||
h.Set(k, v)
|
||||
}
|
||||
|
||||
// If no user metadata is specified (and so, the
|
||||
// for-loop below is not entered), metadata from the
|
||||
// source is copied to the destination (due to
|
||||
// single-part copy-object PUT request behaviour).
|
||||
for k, v := range dst.getUserMetaHeadersMap(true) {
|
||||
h.Set(k, v)
|
||||
}
|
||||
|
||||
// Send copy request
|
||||
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||
bucketName: dst.bucket,
|
||||
objectName: dst.object,
|
||||
customHeader: h,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if we got an error response.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, dst.bucket, dst.object)
|
||||
}
|
||||
|
||||
// Return nil on success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Now, handle multipart-copy cases.
|
||||
|
||||
// 1. Initiate a new multipart upload.
|
||||
|
||||
// Set user-metadata on the destination object. If no
|
||||
// user-metadata is specified, and there is only one source,
|
||||
// (only) then metadata from source is copied.
|
||||
userMeta := dst.getUserMetaHeadersMap(false)
|
||||
metaMap := userMeta
|
||||
if len(userMeta) == 0 && len(srcs) == 1 {
|
||||
metaMap = srcUserMeta
|
||||
}
|
||||
metaHeaders := make(map[string][]string)
|
||||
for k, v := range metaMap {
|
||||
metaHeaders[k] = append(metaHeaders[k], v)
|
||||
}
|
||||
uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating new upload: %v", err)
|
||||
}
|
||||
|
||||
// 2. Perform copy part uploads
|
||||
objParts := []CompletePart{}
|
||||
partIndex := 1
|
||||
for i, src := range srcs {
|
||||
h := src.Headers
|
||||
// Add destination encryption headers
|
||||
for k, v := range dst.encryption.getSSEHeaders(false) {
|
||||
h.Set(k, v)
|
||||
}
|
||||
|
||||
// calculate start/end indices of parts after
|
||||
// splitting.
|
||||
startIdx, endIdx := calculateEvenSplits(srcSizes[i], src)
|
||||
for j, start := range startIdx {
|
||||
end := endIdx[j]
|
||||
|
||||
// Add (or reset) source range header for
|
||||
// upload part copy request.
|
||||
h.Set("x-amz-copy-source-range",
|
||||
fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
|
||||
// make upload-part-copy request
|
||||
complPart, err := c.uploadPartCopy(dst.bucket,
|
||||
dst.object, uploadID, partIndex, h)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in upload-part-copy - %v", err)
|
||||
}
|
||||
objParts = append(objParts, complPart)
|
||||
partIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Make final complete-multipart request.
|
||||
_, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID,
|
||||
completeMultipartUpload{Parts: objParts})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error in complete-multipart request - %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// partsRequired is ceiling(size / copyPartSize)
|
||||
func partsRequired(size int64) int64 {
|
||||
r := size / copyPartSize
|
||||
if size%copyPartSize > 0 {
|
||||
r++
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// calculateEvenSplits - computes splits for a source and returns
|
||||
// start and end index slices. Splits happen evenly to be sure that no
|
||||
// part is less than 5MiB, as that could fail the multipart request if
|
||||
// it is not the last part.
|
||||
func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
reqParts := partsRequired(size)
|
||||
startIndex = make([]int64, reqParts)
|
||||
endIndex = make([]int64, reqParts)
|
||||
// Compute number of required parts `k`, as:
|
||||
//
|
||||
// k = ceiling(size / copyPartSize)
|
||||
//
|
||||
// Now, distribute the `size` bytes in the source into
|
||||
// k parts as evenly as possible:
|
||||
//
|
||||
// r parts sized (q+1) bytes, and
|
||||
// (k - r) parts sized q bytes, where
|
||||
//
|
||||
// size = q * k + r (by simple division of size by k,
|
||||
// so that 0 <= r < k)
|
||||
//
|
||||
start := src.start
|
||||
if start == -1 {
|
||||
start = 0
|
||||
}
|
||||
quot, rem := size/reqParts, size%reqParts
|
||||
nextStart := start
|
||||
for j := int64(0); j < reqParts; j++ {
|
||||
curPartSize := quot
|
||||
if j < rem {
|
||||
curPartSize++
|
||||
}
|
||||
|
||||
cStart := nextStart
|
||||
cEnd := cStart + curPartSize - 1
|
||||
nextStart = cEnd + 1
|
||||
|
||||
startIndex[j], endIndex[j] = cStart, cEnd
|
||||
}
|
||||
return
|
||||
}
|
88
vendor/github.com/minio/minio-go/api-compose-object_test.go
generated
vendored
Normal file
88
vendor/github.com/minio/minio-go/api-compose-object_test.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package minio
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
gb1 = 1024 * 1024 * 1024
|
||||
gb5 = 5 * gb1
|
||||
gb5p1 = gb5 + 1
|
||||
gb10p1 = 2*gb5 + 1
|
||||
gb10p2 = 2*gb5 + 2
|
||||
)
|
||||
|
||||
func TestPartsRequired(t *testing.T) {
|
||||
testCases := []struct {
|
||||
size, ref int64
|
||||
}{
|
||||
{0, 0},
|
||||
{1, 1},
|
||||
{gb5, 1},
|
||||
{2 * gb5, 2},
|
||||
{gb10p1, 3},
|
||||
{gb10p2, 3},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
res := partsRequired(testCase.size)
|
||||
if res != testCase.ref {
|
||||
t.Errorf("Test %d - output did not match with reference results", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalculateEvenSplits(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
// input size and source object
|
||||
size int64
|
||||
src SourceInfo
|
||||
|
||||
// output part-indexes
|
||||
starts, ends []int64
|
||||
}{
|
||||
{0, SourceInfo{start: -1}, nil, nil},
|
||||
{1, SourceInfo{start: -1}, []int64{0}, []int64{0}},
|
||||
{1, SourceInfo{start: 0}, []int64{0}, []int64{0}},
|
||||
|
||||
{gb1, SourceInfo{start: -1}, []int64{0}, []int64{gb1 - 1}},
|
||||
{gb5, SourceInfo{start: -1}, []int64{0}, []int64{gb5 - 1}},
|
||||
|
||||
// 2 part splits
|
||||
{gb5p1, SourceInfo{start: -1}, []int64{0, gb5/2 + 1}, []int64{gb5 / 2, gb5}},
|
||||
{gb5p1, SourceInfo{start: -1}, []int64{0, gb5/2 + 1}, []int64{gb5 / 2, gb5}},
|
||||
|
||||
// 3 part splits
|
||||
{gb10p1, SourceInfo{start: -1},
|
||||
[]int64{0, gb10p1/3 + 1, 2*gb10p1/3 + 1},
|
||||
[]int64{gb10p1 / 3, 2 * gb10p1 / 3, gb10p1 - 1}},
|
||||
|
||||
{gb10p2, SourceInfo{start: -1},
|
||||
[]int64{0, gb10p2 / 3, 2 * gb10p2 / 3},
|
||||
[]int64{gb10p2/3 - 1, 2*gb10p2/3 - 1, gb10p2 - 1}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
resStart, resEnd := calculateEvenSplits(testCase.size, testCase.src)
|
||||
if !reflect.DeepEqual(testCase.starts, resStart) || !reflect.DeepEqual(testCase.ends, resEnd) {
|
||||
t.Errorf("Test %d - output did not match with reference results", i+1)
|
||||
}
|
||||
}
|
||||
}
|
83
vendor/github.com/minio/minio-go/api-datatypes.go
generated
vendored
Normal file
83
vendor/github.com/minio/minio-go/api-datatypes.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BucketInfo container for bucket metadata.
|
||||
type BucketInfo struct {
|
||||
// The name of the bucket.
|
||||
Name string `json:"name"`
|
||||
// Date the bucket was created.
|
||||
CreationDate time.Time `json:"creationDate"`
|
||||
}
|
||||
|
||||
// ObjectInfo container for object metadata.
|
||||
type ObjectInfo struct {
|
||||
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
|
||||
// ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
|
||||
// each parts concatenated into one string.
|
||||
ETag string `json:"etag"`
|
||||
|
||||
Key string `json:"name"` // Name of the object
|
||||
LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
|
||||
Size int64 `json:"size"` // Size in bytes of the object.
|
||||
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
|
||||
|
||||
// Collection of additional metadata on the object.
|
||||
// eg: x-amz-meta-*, content-encoding etc.
|
||||
Metadata http.Header `json:"metadata"`
|
||||
|
||||
// Owner name.
|
||||
Owner struct {
|
||||
DisplayName string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
} `json:"owner"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
StorageClass string `json:"storageClass"`
|
||||
|
||||
// Error
|
||||
Err error `json:"-"`
|
||||
}
|
||||
|
||||
// ObjectMultipartInfo container for multipart object metadata.
|
||||
type ObjectMultipartInfo struct {
|
||||
// Date and time at which the multipart upload was initiated.
|
||||
Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
Initiator initiator
|
||||
Owner owner
|
||||
|
||||
// The type of storage to use for the object. Defaults to 'STANDARD'.
|
||||
StorageClass string
|
||||
|
||||
// Key of the object for which the multipart upload was initiated.
|
||||
Key string
|
||||
|
||||
// Size in bytes of the object.
|
||||
Size int64
|
||||
|
||||
// Upload ID that identifies the multipart upload.
|
||||
UploadID string `xml:"UploadId"`
|
||||
|
||||
// Error
|
||||
Err error
|
||||
}
|
267
vendor/github.com/minio/minio-go/api-error-response.go
generated
vendored
Normal file
267
vendor/github.com/minio/minio-go/api-error-response.go
generated
vendored
Normal file
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
/* **** SAMPLE ERROR RESPONSE ****
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Error>
|
||||
<Code>AccessDenied</Code>
|
||||
<Message>Access Denied</Message>
|
||||
<BucketName>bucketName</BucketName>
|
||||
<Key>objectName</Key>
|
||||
<RequestId>F19772218238A85A</RequestId>
|
||||
<HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
|
||||
</Error>
|
||||
*/
|
||||
|
||||
// ErrorResponse - Is the typed error returned by all API operations.
|
||||
type ErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
BucketName string
|
||||
Key string
|
||||
RequestID string `xml:"RequestId"`
|
||||
HostID string `xml:"HostId"`
|
||||
|
||||
// Region where the bucket is located. This header is returned
|
||||
// only in HEAD bucket and ListObjects response.
|
||||
Region string
|
||||
|
||||
// Headers of the returned S3 XML error
|
||||
Headers http.Header `xml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
||||
// http headers.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// import s3 "github.com/minio/minio-go"
|
||||
// ...
|
||||
// ...
|
||||
// reader, stat, err := s3.GetObject(...)
|
||||
// if err != nil {
|
||||
// resp := s3.ToErrorResponse(err)
|
||||
// }
|
||||
// ...
|
||||
func ToErrorResponse(err error) ErrorResponse {
|
||||
switch err := err.(type) {
|
||||
case ErrorResponse:
|
||||
return err
|
||||
default:
|
||||
return ErrorResponse{}
|
||||
}
|
||||
}
|
||||
|
||||
// Error - Returns S3 error string.
|
||||
func (e ErrorResponse) Error() string {
|
||||
if e.Message == "" {
|
||||
msg, ok := s3ErrorResponseMap[e.Code]
|
||||
if !ok {
|
||||
msg = fmt.Sprintf("Error response code %s.", e.Code)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// Common string for errors to report issue location in unexpected
|
||||
// cases.
|
||||
const (
|
||||
reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
|
||||
)
|
||||
|
||||
// httpRespToErrorResponse returns a new encoded ErrorResponse
|
||||
// structure as error.
|
||||
func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
|
||||
if resp == nil {
|
||||
msg := "Response is empty. " + reportIssue
|
||||
return ErrInvalidArgument(msg)
|
||||
}
|
||||
var errResp ErrorResponse
|
||||
|
||||
err := xmlDecoder(resp.Body, &errResp)
|
||||
// Xml decoding failed with no body, fall back to HTTP headers.
|
||||
if err != nil {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
if objectName == "" {
|
||||
errResp = ErrorResponse{
|
||||
Code: "NoSuchBucket",
|
||||
Message: "The specified bucket does not exist.",
|
||||
BucketName: bucketName,
|
||||
}
|
||||
} else {
|
||||
errResp = ErrorResponse{
|
||||
Code: "NoSuchKey",
|
||||
Message: "The specified key does not exist.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
case http.StatusForbidden:
|
||||
errResp = ErrorResponse{
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
case http.StatusConflict:
|
||||
errResp = ErrorResponse{
|
||||
Code: "Conflict",
|
||||
Message: "Bucket not empty.",
|
||||
BucketName: bucketName,
|
||||
}
|
||||
case http.StatusPreconditionFailed:
|
||||
errResp = ErrorResponse{
|
||||
Code: "PreconditionFailed",
|
||||
Message: s3ErrorResponseMap["PreconditionFailed"],
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
default:
|
||||
errResp = ErrorResponse{
|
||||
Code: resp.Status,
|
||||
Message: resp.Status,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save hodID, requestID and region information
|
||||
// from headers if not available through error XML.
|
||||
if errResp.RequestID == "" {
|
||||
errResp.RequestID = resp.Header.Get("x-amz-request-id")
|
||||
}
|
||||
if errResp.HostID == "" {
|
||||
errResp.HostID = resp.Header.Get("x-amz-id-2")
|
||||
}
|
||||
if errResp.Region == "" {
|
||||
errResp.Region = resp.Header.Get("x-amz-bucket-region")
|
||||
}
|
||||
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
|
||||
errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region)
|
||||
}
|
||||
|
||||
// Save headers returned in the API XML error
|
||||
errResp.Headers = resp.Header
|
||||
|
||||
return errResp
|
||||
}
|
||||
|
||||
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
|
||||
func ErrTransferAccelerationBucket(bucketName string) error {
|
||||
msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
|
||||
return ErrorResponse{
|
||||
Code: "InvalidArgument",
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrEntityTooLarge - Input size is larger than supported maximum.
|
||||
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
|
||||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
|
||||
return ErrorResponse{
|
||||
Code: "EntityTooLarge",
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrEntityTooSmall - Input size is smaller than supported minimum.
|
||||
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
|
||||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
|
||||
return ErrorResponse{
|
||||
Code: "EntityTooLarge",
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrUnexpectedEOF - Unexpected end of file reached.
|
||||
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
|
||||
msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
|
||||
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
|
||||
return ErrorResponse{
|
||||
Code: "UnexpectedEOF",
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidBucketName - Invalid bucket name response.
|
||||
func ErrInvalidBucketName(message string) error {
|
||||
return ErrorResponse{
|
||||
Code: "InvalidBucketName",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidObjectName - Invalid object name response.
|
||||
func ErrInvalidObjectName(message string) error {
|
||||
return ErrorResponse{
|
||||
Code: "NoSuchKey",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidObjectPrefix - Invalid object prefix response is
|
||||
// similar to object name response.
|
||||
var ErrInvalidObjectPrefix = ErrInvalidObjectName
|
||||
|
||||
// ErrInvalidArgument - Invalid argument response.
|
||||
func ErrInvalidArgument(message string) error {
|
||||
return ErrorResponse{
|
||||
Code: "InvalidArgument",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// ErrNoSuchBucketPolicy - No Such Bucket Policy response
|
||||
// The specified bucket does not have a bucket policy.
|
||||
func ErrNoSuchBucketPolicy(message string) error {
|
||||
return ErrorResponse{
|
||||
Code: "NoSuchBucketPolicy",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// ErrAPINotSupported - API not supported response
|
||||
// The specified API call is not supported
|
||||
func ErrAPINotSupported(message string) error {
|
||||
return ErrorResponse{
|
||||
Code: "APINotSupported",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
282
vendor/github.com/minio/minio-go/api-error-response_test.go
generated
vendored
Normal file
282
vendor/github.com/minio/minio-go/api-error-response_test.go
generated
vendored
Normal file
|
@ -0,0 +1,282 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required bZy applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests validate the Error generator function for http response with error.
|
||||
func TestHttpRespToErrorResponse(t *testing.T) {
|
||||
// 'genAPIErrorResponse' generates ErrorResponse for given APIError.
|
||||
// provides a encodable populated response values.
|
||||
genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse {
|
||||
var errResp = ErrorResponse{}
|
||||
errResp.Code = err.Code
|
||||
errResp.Message = err.Description
|
||||
errResp.BucketName = bucketName
|
||||
return errResp
|
||||
}
|
||||
|
||||
// Encodes the response headers into XML format.
|
||||
encodeErr := func(response interface{}) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
bytesBuffer.WriteString(xml.Header)
|
||||
encode := xml.NewEncoder(&bytesBuffer)
|
||||
encode.Encode(response)
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// `createAPIErrorResponse` Mocks XML error response from the server.
|
||||
createAPIErrorResponse := func(APIErr APIError, bucketName string) *http.Response {
|
||||
// generate error response.
|
||||
// response body contains the XML error message.
|
||||
resp := &http.Response{}
|
||||
errorResponse := genAPIErrorResponse(APIErr, bucketName)
|
||||
encodedErrorResponse := encodeErr(errorResponse)
|
||||
// write Header.
|
||||
resp.StatusCode = APIErr.HTTPStatusCode
|
||||
resp.Body = ioutil.NopCloser(bytes.NewBuffer(encodedErrorResponse))
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// 'genErrResponse' contructs error response based http Status Code
|
||||
genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse {
|
||||
errResp := ErrorResponse{
|
||||
Code: code,
|
||||
Message: message,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||
HostID: resp.Header.Get("x-amz-id-2"),
|
||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||
Headers: resp.Header,
|
||||
}
|
||||
return errResp
|
||||
}
|
||||
|
||||
// Generate invalid argument error.
|
||||
genInvalidError := func(message string) error {
|
||||
errResp := ErrorResponse{
|
||||
Code: "InvalidArgument",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
return errResp
|
||||
}
|
||||
|
||||
// Set common http response headers.
|
||||
setCommonHeaders := func(resp *http.Response) *http.Response {
|
||||
// set headers.
|
||||
resp.Header = make(http.Header)
|
||||
resp.Header.Set("x-amz-request-id", "xyz")
|
||||
resp.Header.Set("x-amz-id-2", "abc")
|
||||
resp.Header.Set("x-amz-bucket-region", "us-east-1")
|
||||
return resp
|
||||
}
|
||||
|
||||
// Generate http response with empty body.
|
||||
// Set the StatusCode to the argument supplied.
|
||||
// Sets common headers.
|
||||
genEmptyBodyResponse := func(statusCode int) *http.Response {
|
||||
resp := &http.Response{}
|
||||
// set empty response body.
|
||||
resp.Body = ioutil.NopCloser(bytes.NewBuffer([]byte("")))
|
||||
// set headers.
|
||||
setCommonHeaders(resp)
|
||||
// set status code.
|
||||
resp.StatusCode = statusCode
|
||||
return resp
|
||||
}
|
||||
|
||||
// Decode XML error message from the http response body.
|
||||
decodeXMLError := func(resp *http.Response, t *testing.T) error {
|
||||
var errResp ErrorResponse
|
||||
err := xmlDecoder(resp.Body, &errResp)
|
||||
if err != nil {
|
||||
t.Fatal("XML decoding of response body failed")
|
||||
}
|
||||
return errResp
|
||||
}
|
||||
|
||||
// List of APIErrors used to generate/mock server side XML error response.
|
||||
APIErrors := []APIError{
|
||||
{
|
||||
Code: "NoSuchBucketPolicy",
|
||||
Description: "The specified bucket does not have a bucket policy.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
// List of expected response.
|
||||
// Used for asserting the actual response.
|
||||
expectedErrResponse := []error{
|
||||
genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."),
|
||||
decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket"), t),
|
||||
genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
|
||||
genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
|
||||
genErrResponse(setCommonHeaders(&http.Response{}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
|
||||
genErrResponse(setCommonHeaders(&http.Response{}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
|
||||
genErrResponse(setCommonHeaders(&http.Response{}), "Bad Request", "Bad Request", "minio-bucket", ""),
|
||||
}
|
||||
|
||||
// List of http response to be used as input.
|
||||
inputResponses := []*http.Response{
|
||||
nil,
|
||||
createAPIErrorResponse(APIErrors[0], "minio-bucket"),
|
||||
genEmptyBodyResponse(http.StatusNotFound),
|
||||
genEmptyBodyResponse(http.StatusNotFound),
|
||||
genEmptyBodyResponse(http.StatusForbidden),
|
||||
genEmptyBodyResponse(http.StatusConflict),
|
||||
genEmptyBodyResponse(http.StatusBadRequest),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
inputHTTPResp *http.Response
|
||||
// expected results.
|
||||
expectedResult error
|
||||
// flag indicating whether tests should pass.
|
||||
|
||||
}{
|
||||
{"minio-bucket", "", inputResponses[0], expectedErrResponse[0]},
|
||||
{"minio-bucket", "", inputResponses[1], expectedErrResponse[1]},
|
||||
{"minio-bucket", "", inputResponses[2], expectedErrResponse[2]},
|
||||
{"minio-bucket", "Asia/", inputResponses[3], expectedErrResponse[3]},
|
||||
{"minio-bucket", "", inputResponses[4], expectedErrResponse[4]},
|
||||
{"minio-bucket", "", inputResponses[5], expectedErrResponse[5]},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualResult := httpRespToErrorResponse(testCase.inputHTTPResp, testCase.bucketName, testCase.objectName)
|
||||
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
|
||||
t.Errorf("Test %d: Expected result to be '%#v', but instead got '%#v'", i+1, testCase.expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test validates 'ErrEntityTooLarge' error response.
|
||||
func TestErrEntityTooLarge(t *testing.T) {
|
||||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", 1000000, 99999)
|
||||
expectedResult := ErrorResponse{
|
||||
Code: "EntityTooLarge",
|
||||
Message: msg,
|
||||
BucketName: "minio-bucket",
|
||||
Key: "Asia/",
|
||||
}
|
||||
actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/")
|
||||
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
|
||||
// Test validates 'ErrEntityTooSmall' error response.
|
||||
func TestErrEntityTooSmall(t *testing.T) {
|
||||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", -1)
|
||||
expectedResult := ErrorResponse{
|
||||
Code: "EntityTooLarge",
|
||||
Message: msg,
|
||||
BucketName: "minio-bucket",
|
||||
Key: "Asia/",
|
||||
}
|
||||
actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/")
|
||||
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
|
||||
// Test validates 'ErrUnexpectedEOF' error response.
|
||||
func TestErrUnexpectedEOF(t *testing.T) {
|
||||
msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
|
||||
strconv.FormatInt(100, 10), strconv.FormatInt(101, 10))
|
||||
expectedResult := ErrorResponse{
|
||||
Code: "UnexpectedEOF",
|
||||
Message: msg,
|
||||
BucketName: "minio-bucket",
|
||||
Key: "Asia/",
|
||||
}
|
||||
actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/")
|
||||
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
|
||||
// Test validates 'ErrInvalidBucketName' error response.
|
||||
func TestErrInvalidBucketName(t *testing.T) {
|
||||
expectedResult := ErrorResponse{
|
||||
Code: "InvalidBucketName",
|
||||
Message: "Invalid Bucket name",
|
||||
RequestID: "minio",
|
||||
}
|
||||
actualResult := ErrInvalidBucketName("Invalid Bucket name")
|
||||
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
|
||||
// Test validates 'ErrInvalidObjectName' error response.
|
||||
func TestErrInvalidObjectName(t *testing.T) {
|
||||
expectedResult := ErrorResponse{
|
||||
Code: "NoSuchKey",
|
||||
Message: "Invalid Object Key",
|
||||
RequestID: "minio",
|
||||
}
|
||||
actualResult := ErrInvalidObjectName("Invalid Object Key")
|
||||
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
|
||||
// Test validates 'ErrInvalidArgument' response.
|
||||
func TestErrInvalidArgument(t *testing.T) {
|
||||
expectedResult := ErrorResponse{
|
||||
Code: "InvalidArgument",
|
||||
Message: "Invalid Argument",
|
||||
RequestID: "minio",
|
||||
}
|
||||
actualResult := ErrInvalidArgument("Invalid Argument")
|
||||
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests if the Message field is missing.
|
||||
func TestErrWithoutMessage(t *testing.T) {
|
||||
errResp := ErrorResponse{
|
||||
Code: "AccessDenied",
|
||||
RequestID: "minio",
|
||||
}
|
||||
if errResp.Error() != "Access Denied." {
|
||||
t.Errorf("Expected \"Access Denied.\", got %s", errResp)
|
||||
}
|
||||
errResp = ErrorResponse{
|
||||
Code: "InvalidArgument",
|
||||
RequestID: "minio",
|
||||
}
|
||||
if errResp.Error() != "Error response code InvalidArgument." {
|
||||
t.Errorf("Expected \"Error response code InvalidArgument.\", got %s", errResp)
|
||||
}
|
||||
}
|
113
vendor/github.com/minio/minio-go/api-get-object-file.go
generated
vendored
Normal file
113
vendor/github.com/minio/minio-go/api-get-object-file.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// FGetObject - download contents of an object to a local file.
|
||||
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify if destination already exists.
|
||||
st, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
// If the destination exists and is a directory.
|
||||
if st.IsDir() {
|
||||
return ErrInvalidArgument("fileName is a directory.")
|
||||
}
|
||||
}
|
||||
|
||||
// Proceed if file does not exist. return for all other errors.
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extract top level directory.
|
||||
objectDir, _ := filepath.Split(filePath)
|
||||
if objectDir != "" {
|
||||
// Create any missing top level directories.
|
||||
if err := os.MkdirAll(objectDir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Gather md5sum.
|
||||
objectStat, err := c.StatObject(bucketName, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write to a temporary file "fileName.part.minio" before saving.
|
||||
filePartPath := filePath + objectStat.ETag + ".part.minio"
|
||||
|
||||
// If exists, open in append mode. If not create it as a part file.
|
||||
filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Issue Stat to get the current offset.
|
||||
st, err = filePart.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize get object request headers to set the
|
||||
// appropriate range offsets to read from.
|
||||
reqHeaders := NewGetReqHeaders()
|
||||
if st.Size() > 0 {
|
||||
reqHeaders.SetRange(st.Size(), 0)
|
||||
}
|
||||
|
||||
// Seek to current position for incoming reader.
|
||||
objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write to the part file.
|
||||
if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the file before rename, this is specifically needed for Windows users.
|
||||
if err = filePart.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Safely completed. Now commit by renaming to actual filename.
|
||||
if err = os.Rename(filePartPath, filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Return.
|
||||
return nil
|
||||
}
|
691
vendor/github.com/minio/minio-go/api-get-object.go
generated
vendored
Normal file
691
vendor/github.com/minio/minio-go/api-get-object.go
generated
vendored
Normal file
|
@ -0,0 +1,691 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/encrypt"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials,
|
||||
// returned stream should be closed by the caller.
|
||||
func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
|
||||
if encryptMaterials == nil {
|
||||
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
|
||||
}
|
||||
|
||||
// Fetch encrypted object
|
||||
encReader, err := c.GetObject(bucketName, objectName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Stat object to get its encryption metadata
|
||||
st, err := encReader.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup object for decrytion, object is transparently
|
||||
// decrypted as the consumer starts reading.
|
||||
encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey))
|
||||
|
||||
// Success.
|
||||
return encryptMaterials, nil
|
||||
}
|
||||
|
||||
// GetObject - returns an seekable, readable object.
|
||||
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var httpReader io.ReadCloser
|
||||
var objectInfo ObjectInfo
|
||||
var err error
|
||||
|
||||
// Create request channel.
|
||||
reqCh := make(chan getRequest)
|
||||
// Create response channel.
|
||||
resCh := make(chan getResponse)
|
||||
// Create done channel.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// This routine feeds partial object data as and when the caller reads.
|
||||
go func() {
|
||||
defer close(reqCh)
|
||||
defer close(resCh)
|
||||
|
||||
// Used to verify if etag of object has changed since last read.
|
||||
var etag string
|
||||
|
||||
// Loop through the incoming control messages and read data.
|
||||
for {
|
||||
select {
|
||||
// When the done channel is closed exit our routine.
|
||||
case <-doneCh:
|
||||
// Close the http response body before returning.
|
||||
// This ends the connection with the server.
|
||||
if httpReader != nil {
|
||||
httpReader.Close()
|
||||
}
|
||||
return
|
||||
|
||||
// Gather incoming request.
|
||||
case req := <-reqCh:
|
||||
// If this is the first request we may not need to do a getObject request yet.
|
||||
if req.isFirstReq {
|
||||
// First request is a Read/ReadAt.
|
||||
if req.isReadOp {
|
||||
reqHeaders := NewGetReqHeaders()
|
||||
// Differentiate between wanting the whole object and just a range.
|
||||
if req.isReadAt {
|
||||
// If this is a ReadAt request only get the specified range.
|
||||
// Range is set with respect to the offset and length of the buffer requested.
|
||||
// Do not set objectInfo from the first readAt request because it will not get
|
||||
// the whole object.
|
||||
reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
|
||||
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||
} else {
|
||||
if req.Offset > 0 {
|
||||
reqHeaders.SetRange(req.Offset, 0)
|
||||
}
|
||||
|
||||
// First request is a Read request.
|
||||
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||
}
|
||||
if err != nil {
|
||||
resCh <- getResponse{
|
||||
Error: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
etag = objectInfo.ETag
|
||||
// Read at least firstReq.Buffer bytes, if not we have
|
||||
// reached our EOF.
|
||||
size, err := io.ReadFull(httpReader, req.Buffer)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// If an EOF happens after reading some but not
|
||||
// all the bytes ReadFull returns ErrUnexpectedEOF
|
||||
err = io.EOF
|
||||
}
|
||||
// Send back the first response.
|
||||
resCh <- getResponse{
|
||||
objectInfo: objectInfo,
|
||||
Size: int(size),
|
||||
Error: err,
|
||||
didRead: true,
|
||||
}
|
||||
} else {
|
||||
// First request is a Stat or Seek call.
|
||||
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
|
||||
objectInfo, err = c.StatObject(bucketName, objectName)
|
||||
if err != nil {
|
||||
resCh <- getResponse{
|
||||
Error: err,
|
||||
}
|
||||
// Exit the go-routine.
|
||||
return
|
||||
}
|
||||
etag = objectInfo.ETag
|
||||
// Send back the first response.
|
||||
resCh <- getResponse{
|
||||
objectInfo: objectInfo,
|
||||
}
|
||||
}
|
||||
} else if req.settingObjectInfo { // Request is just to get objectInfo.
|
||||
reqHeaders := NewGetReqHeaders()
|
||||
if etag != "" {
|
||||
reqHeaders.SetMatchETag(etag)
|
||||
}
|
||||
objectInfo, err := c.statObject(bucketName, objectName, reqHeaders)
|
||||
if err != nil {
|
||||
resCh <- getResponse{
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
// Send back the objectInfo.
|
||||
resCh <- getResponse{
|
||||
objectInfo: objectInfo,
|
||||
}
|
||||
} else {
|
||||
// Offset changes fetch the new object at an Offset.
|
||||
// Because the httpReader may not be set by the first
|
||||
// request if it was a stat or seek it must be checked
|
||||
// if the object has been read or not to only initialize
|
||||
// new ones when they haven't been already.
|
||||
// All readAt requests are new requests.
|
||||
if req.DidOffsetChange || !req.beenRead {
|
||||
reqHeaders := NewGetReqHeaders()
|
||||
if etag != "" {
|
||||
reqHeaders.SetMatchETag(etag)
|
||||
}
|
||||
if httpReader != nil {
|
||||
// Close previously opened http reader.
|
||||
httpReader.Close()
|
||||
}
|
||||
// If this request is a readAt only get the specified range.
|
||||
if req.isReadAt {
|
||||
// Range is set with respect to the offset and length of the buffer requested.
|
||||
reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
|
||||
httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||
} else {
|
||||
// Range is set with respect to the offset.
|
||||
if req.Offset > 0 {
|
||||
reqHeaders.SetRange(req.Offset, 0)
|
||||
}
|
||||
|
||||
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||
}
|
||||
if err != nil {
|
||||
resCh <- getResponse{
|
||||
Error: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Read at least req.Buffer bytes, if not we have
|
||||
// reached our EOF.
|
||||
size, err := io.ReadFull(httpReader, req.Buffer)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// If an EOF happens after reading some but not
|
||||
// all the bytes ReadFull returns ErrUnexpectedEOF
|
||||
err = io.EOF
|
||||
}
|
||||
// Reply back how much was read.
|
||||
resCh <- getResponse{
|
||||
Size: int(size),
|
||||
Error: err,
|
||||
didRead: true,
|
||||
objectInfo: objectInfo,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a newObject through the information sent back by reqCh.
|
||||
return newObject(reqCh, resCh, doneCh), nil
|
||||
}
|
||||
|
||||
// get request message container to communicate with internal
|
||||
// go-routine.
|
||||
type getRequest struct {
|
||||
Buffer []byte
|
||||
Offset int64 // readAt offset.
|
||||
DidOffsetChange bool // Tracks the offset changes for Seek requests.
|
||||
beenRead bool // Determines if this is the first time an object is being read.
|
||||
isReadAt bool // Determines if this request is a request to a specific range
|
||||
isReadOp bool // Determines if this request is a Read or Read/At request.
|
||||
isFirstReq bool // Determines if this request is the first time an object is being accessed.
|
||||
settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
|
||||
}
|
||||
|
||||
// get response message container to reply back for the request.
|
||||
type getResponse struct {
|
||||
Size int
|
||||
Error error
|
||||
didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
|
||||
objectInfo ObjectInfo // Used for the first request.
|
||||
}
|
||||
|
||||
// Object represents an open object. It implements
|
||||
// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
|
||||
type Object struct {
|
||||
// Mutex.
|
||||
mutex *sync.Mutex
|
||||
|
||||
// User allocated and defined.
|
||||
reqCh chan<- getRequest
|
||||
resCh <-chan getResponse
|
||||
doneCh chan<- struct{}
|
||||
currOffset int64
|
||||
objectInfo ObjectInfo
|
||||
|
||||
// Ask lower level to initiate data fetching based on currOffset
|
||||
seekData bool
|
||||
|
||||
// Keeps track of closed call.
|
||||
isClosed bool
|
||||
|
||||
// Keeps track of if this is the first call.
|
||||
isStarted bool
|
||||
|
||||
// Previous error saved for future calls.
|
||||
prevErr error
|
||||
|
||||
// Keeps track of if this object has been read yet.
|
||||
beenRead bool
|
||||
|
||||
// Keeps track of if objectInfo has been set yet.
|
||||
objectInfoSet bool
|
||||
}
|
||||
|
||||
// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
|
||||
// Returns back the size of the buffer read, if anything was read, as well
|
||||
// as any error encountered. For all first requests sent on the object
|
||||
// it is also responsible for sending back the objectInfo.
|
||||
func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||
o.reqCh <- request
|
||||
response := <-o.resCh
|
||||
|
||||
// Return any error to the top level.
|
||||
if response.Error != nil {
|
||||
return response, response.Error
|
||||
}
|
||||
|
||||
// This was the first request.
|
||||
if !o.isStarted {
|
||||
// The object has been operated on.
|
||||
o.isStarted = true
|
||||
}
|
||||
// Set the objectInfo if the request was not readAt
|
||||
// and it hasn't been set before.
|
||||
if !o.objectInfoSet && !request.isReadAt {
|
||||
o.objectInfo = response.objectInfo
|
||||
o.objectInfoSet = true
|
||||
}
|
||||
// Set beenRead only if it has not been set before.
|
||||
if !o.beenRead {
|
||||
o.beenRead = response.didRead
|
||||
}
|
||||
// Data are ready on the wire, no need to reinitiate connection in lower level
|
||||
o.seekData = false
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// setOffset - handles the setting of offsets for
|
||||
// Read/ReadAt/Seek requests.
|
||||
func (o *Object) setOffset(bytesRead int64) error {
|
||||
// Update the currentOffset.
|
||||
o.currOffset += bytesRead
|
||||
|
||||
if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(b) bytes into b. It returns the number of
|
||||
// bytes read (0 <= n <= len(b)) and any error encountered. Returns
|
||||
// io.EOF upon end of file.
|
||||
func (o *Object) Read(b []byte) (n int, err error) {
|
||||
if o == nil {
|
||||
return 0, ErrInvalidArgument("Object is nil")
|
||||
}
|
||||
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// prevErr is previous error saved from previous operation.
|
||||
if o.prevErr != nil || o.isClosed {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
// Create a new request.
|
||||
readReq := getRequest{
|
||||
isReadOp: true,
|
||||
beenRead: o.beenRead,
|
||||
Buffer: b,
|
||||
}
|
||||
|
||||
// Alert that this is the first request.
|
||||
if !o.isStarted {
|
||||
readReq.isFirstReq = true
|
||||
}
|
||||
|
||||
// Ask to establish a new data fetch routine based on seekData flag
|
||||
readReq.DidOffsetChange = o.seekData
|
||||
readReq.Offset = o.currOffset
|
||||
|
||||
// Send and receive from the first request.
|
||||
response, err := o.doGetRequest(readReq)
|
||||
if err != nil && err != io.EOF {
|
||||
// Save the error for future calls.
|
||||
o.prevErr = err
|
||||
return response.Size, err
|
||||
}
|
||||
|
||||
// Bytes read.
|
||||
bytesRead := int64(response.Size)
|
||||
|
||||
// Set the new offset.
|
||||
oerr := o.setOffset(bytesRead)
|
||||
if oerr != nil {
|
||||
// Save the error for future calls.
|
||||
o.prevErr = oerr
|
||||
return response.Size, oerr
|
||||
}
|
||||
|
||||
// Return the response.
|
||||
return response.Size, err
|
||||
}
|
||||
|
||||
// Stat returns the ObjectInfo structure describing Object.
|
||||
func (o *Object) Stat() (ObjectInfo, error) {
|
||||
if o == nil {
|
||||
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
|
||||
}
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
|
||||
return ObjectInfo{}, o.prevErr
|
||||
}
|
||||
|
||||
// This is the first request.
|
||||
if !o.isStarted || !o.objectInfoSet {
|
||||
statReq := getRequest{
|
||||
isFirstReq: !o.isStarted,
|
||||
settingObjectInfo: !o.objectInfoSet,
|
||||
}
|
||||
|
||||
// Send the request and get the response.
|
||||
_, err := o.doGetRequest(statReq)
|
||||
if err != nil {
|
||||
o.prevErr = err
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return o.objectInfo, nil
|
||||
}
|
||||
|
||||
// ReadAt reads len(b) bytes from the File starting at byte offset
|
||||
// off. It returns the number of bytes read and the error, if any.
|
||||
// ReadAt always returns a non-nil error when n < len(b). At end of
|
||||
// file, that error is io.EOF.
|
||||
func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||
if o == nil {
|
||||
return 0, ErrInvalidArgument("Object is nil")
|
||||
}
|
||||
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// prevErr is error which was saved in previous operation.
|
||||
if o.prevErr != nil || o.isClosed {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
|
||||
// Can only compare offsets to size when size has been set.
|
||||
if o.objectInfoSet {
|
||||
// If offset is negative than we return io.EOF.
|
||||
// If offset is greater than or equal to object size we return io.EOF.
|
||||
if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Create the new readAt request.
|
||||
readAtReq := getRequest{
|
||||
isReadOp: true,
|
||||
isReadAt: true,
|
||||
DidOffsetChange: true, // Offset always changes.
|
||||
beenRead: o.beenRead, // Set if this is the first request to try and read.
|
||||
Offset: offset, // Set the offset.
|
||||
Buffer: b,
|
||||
}
|
||||
|
||||
// Alert that this is the first request.
|
||||
if !o.isStarted {
|
||||
readAtReq.isFirstReq = true
|
||||
}
|
||||
|
||||
// Send and receive from the first request.
|
||||
response, err := o.doGetRequest(readAtReq)
|
||||
if err != nil && err != io.EOF {
|
||||
// Save the error.
|
||||
o.prevErr = err
|
||||
return response.Size, err
|
||||
}
|
||||
// Bytes read.
|
||||
bytesRead := int64(response.Size)
|
||||
// There is no valid objectInfo yet
|
||||
// to compare against for EOF.
|
||||
if !o.objectInfoSet {
|
||||
// Update the currentOffset.
|
||||
o.currOffset += bytesRead
|
||||
} else {
|
||||
// If this was not the first request update
|
||||
// the offsets and compare against objectInfo
|
||||
// for EOF.
|
||||
oerr := o.setOffset(bytesRead)
|
||||
if oerr != nil {
|
||||
o.prevErr = oerr
|
||||
return response.Size, oerr
|
||||
}
|
||||
}
|
||||
return response.Size, err
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read or Write to offset,
|
||||
// interpreted according to whence: 0 means relative to the
|
||||
// origin of the file, 1 means relative to the current offset,
|
||||
// and 2 means relative to the end.
|
||||
// Seek returns the new offset and an error, if any.
|
||||
//
|
||||
// Seeking to a negative offset is an error. Seeking to any positive
|
||||
// offset is legal, subsequent io operations succeed until the
|
||||
// underlying object is not closed.
|
||||
func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
||||
if o == nil {
|
||||
return 0, ErrInvalidArgument("Object is nil")
|
||||
}
|
||||
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
if o.prevErr != nil {
|
||||
// At EOF seeking is legal allow only io.EOF, for any other errors we return.
|
||||
if o.prevErr != io.EOF {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
}
|
||||
|
||||
// Negative offset is valid for whence of '2'.
|
||||
if offset < 0 && whence != 2 {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
|
||||
}
|
||||
|
||||
// This is the first request. So before anything else
|
||||
// get the ObjectInfo.
|
||||
if !o.isStarted || !o.objectInfoSet {
|
||||
// Create the new Seek request.
|
||||
seekReq := getRequest{
|
||||
isReadOp: false,
|
||||
Offset: offset,
|
||||
isFirstReq: true,
|
||||
}
|
||||
// Send and receive from the seek request.
|
||||
_, err := o.doGetRequest(seekReq)
|
||||
if err != nil {
|
||||
// Save the error.
|
||||
o.prevErr = err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Switch through whence.
|
||||
switch whence {
|
||||
default:
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
|
||||
case 0:
|
||||
if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
o.currOffset = offset
|
||||
case 1:
|
||||
if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
o.currOffset += offset
|
||||
case 2:
|
||||
// If we don't know the object size return an error for io.SeekEnd
|
||||
if o.objectInfo.Size < 0 {
|
||||
return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown")
|
||||
}
|
||||
// Seeking to positive offset is valid for whence '2', but
|
||||
// since we are backing a Reader we have reached 'EOF' if
|
||||
// offset is positive.
|
||||
if offset > 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
// Seeking to negative position not allowed for whence.
|
||||
if o.objectInfo.Size+offset < 0 {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
|
||||
}
|
||||
o.currOffset = o.objectInfo.Size + offset
|
||||
}
|
||||
// Reset the saved error since we successfully seeked, let the Read
|
||||
// and ReadAt decide.
|
||||
if o.prevErr == io.EOF {
|
||||
o.prevErr = nil
|
||||
}
|
||||
|
||||
// Ask lower level to fetch again from source
|
||||
o.seekData = true
|
||||
|
||||
// Return the effective offset.
|
||||
return o.currOffset, nil
|
||||
}
|
||||
|
||||
// Close - The behavior of Close after the first call returns error
|
||||
// for subsequent Close() calls.
|
||||
func (o *Object) Close() (err error) {
|
||||
if o == nil {
|
||||
return ErrInvalidArgument("Object is nil")
|
||||
}
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// if already closed return an error.
|
||||
if o.isClosed {
|
||||
return o.prevErr
|
||||
}
|
||||
|
||||
// Close successfully.
|
||||
close(o.doneCh)
|
||||
|
||||
// Save for future operations.
|
||||
errMsg := "Object is already closed. Bad file descriptor."
|
||||
o.prevErr = errors.New(errMsg)
|
||||
// Save here that we closed done channel successfully.
|
||||
o.isClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// newObject instantiates a new *minio.Object*
|
||||
// ObjectInfo will be set by setObjectInfo
|
||||
func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
|
||||
return &Object{
|
||||
mutex: &sync.Mutex{},
|
||||
reqCh: reqCh,
|
||||
resCh: resCh,
|
||||
doneCh: doneCh,
|
||||
}
|
||||
}
|
||||
|
||||
// getObject - retrieve object from Object Storage.
|
||||
//
|
||||
// Additionally this function also takes range arguments to download the specified
|
||||
// range bytes of an object. Setting offset and length = 0 will download the full object.
|
||||
//
|
||||
// For more information about the HTTP Range header.
|
||||
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
|
||||
func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
|
||||
// Validate input arguments.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Set all the necessary reqHeaders.
|
||||
customHeader := make(http.Header)
|
||||
for key, value := range reqHeaders.Header {
|
||||
customHeader[key] = value
|
||||
}
|
||||
|
||||
// Execute GET on objectName.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
customHeader: customHeader,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||
md5sum = strings.TrimSuffix(md5sum, "\"")
|
||||
|
||||
// Parse the date.
|
||||
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
msg := "Last-Modified time format not recognized. " + reportIssue
|
||||
return nil, ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Message: msg,
|
||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||
HostID: resp.Header.Get("x-amz-id-2"),
|
||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||
}
|
||||
}
|
||||
|
||||
// Get content-type.
|
||||
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
var objectStat ObjectInfo
|
||||
objectStat.ETag = md5sum
|
||||
objectStat.Key = objectName
|
||||
objectStat.Size = resp.ContentLength
|
||||
objectStat.LastModified = date
|
||||
objectStat.ContentType = contentType
|
||||
|
||||
// do not close body here, caller will close
|
||||
return resp.Body, objectStat, nil
|
||||
}
|
107
vendor/github.com/minio/minio-go/api-get-policy.go
generated
vendored
Normal file
107
vendor/github.com/minio/minio-go/api-get-policy.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// GetBucketPolicy - get bucket policy at a given path.
|
||||
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return policy.BucketPolicyNone, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
return policy.BucketPolicyNone, err
|
||||
}
|
||||
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if errResponse.Code == "NoSuchBucketPolicy" {
|
||||
return policy.BucketPolicyNone, nil
|
||||
}
|
||||
return policy.BucketPolicyNone, err
|
||||
}
|
||||
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
|
||||
}
|
||||
|
||||
// ListBucketPolicies - list all policies for a given prefix and all its children.
|
||||
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return map[string]policy.BucketPolicy{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
return map[string]policy.BucketPolicy{}, err
|
||||
}
|
||||
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if errResponse.Code == "NoSuchBucketPolicy" {
|
||||
return map[string]policy.BucketPolicy{}, nil
|
||||
}
|
||||
return map[string]policy.BucketPolicy{}, err
|
||||
}
|
||||
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
|
||||
}
|
||||
|
||||
// Default empty bucket access policy.
|
||||
var emptyBucketAccessPolicy = policy.BucketAccessPolicy{
|
||||
Version: "2012-10-17",
|
||||
}
|
||||
|
||||
// Request server for current bucket policy.
|
||||
func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return emptyBucketAccessPolicy, err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return emptyBucketAccessPolicy, err
|
||||
}
|
||||
|
||||
policy := policy.BucketAccessPolicy{}
|
||||
err = json.Unmarshal(bucketPolicyBuf, &policy)
|
||||
return policy, err
|
||||
}
|
715
vendor/github.com/minio/minio-go/api-list.go
generated
vendored
Normal file
715
vendor/github.com/minio/minio-go/api-list.go
generated
vendored
Normal file
|
@ -0,0 +1,715 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// ListBuckets list all buckets owned by this authenticated user.
|
||||
//
|
||||
// This call requires explicit authentication, no anonymous requests are
|
||||
// allowed for listing buckets.
|
||||
//
|
||||
// api := client.New(....)
|
||||
// for message := range api.ListBuckets() {
|
||||
// fmt.Println(message)
|
||||
// }
|
||||
//
|
||||
func (c Client) ListBuckets() ([]BucketInfo, error) {
|
||||
// Execute GET on service.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, "", "")
|
||||
}
|
||||
}
|
||||
listAllMyBucketsResult := listAllMyBucketsResult{}
|
||||
err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return listAllMyBucketsResult.Buckets.Bucket, nil
|
||||
}
|
||||
|
||||
/// Bucket Read Operations.
|
||||
|
||||
// ListObjectsV2 lists all objects matching the objectPrefix from
|
||||
// the specified bucket. If recursion is enabled it would list
|
||||
// all subdirectories and all its contents.
|
||||
//
|
||||
// Your input parameters are just bucketName, objectPrefix, recursive
|
||||
// and a done channel for pro-actively closing the internal go
|
||||
// routine. If you enable recursive as 'true' this function will
|
||||
// return back all the objects in a given bucket name and object
|
||||
// prefix.
|
||||
//
|
||||
// api := client.New(....)
|
||||
// // Create a done channel.
|
||||
// doneCh := make(chan struct{})
|
||||
// defer close(doneCh)
|
||||
// // Recursively list all objects in 'mytestbucket'
|
||||
// recursive := true
|
||||
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
|
||||
// fmt.Println(message)
|
||||
// }
|
||||
//
|
||||
func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
|
||||
// Allocate new list objects channel.
|
||||
objectStatCh := make(chan ObjectInfo, 1)
|
||||
// Default listing is delimited at "/"
|
||||
delimiter := "/"
|
||||
if recursive {
|
||||
// If recursive we do not delimit.
|
||||
delimiter = ""
|
||||
}
|
||||
|
||||
// Return object owner information by default
|
||||
fetchOwner := true
|
||||
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// Validate incoming object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// Initiate list objects goroutine here.
|
||||
go func(objectStatCh chan<- ObjectInfo) {
|
||||
defer close(objectStatCh)
|
||||
// Save continuationToken for next request.
|
||||
var continuationToken string
|
||||
for {
|
||||
// Get list of objects a maximum of 1000 per request.
|
||||
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
|
||||
if err != nil {
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If contents are available loop through and send over channel.
|
||||
for _, object := range result.Contents {
|
||||
select {
|
||||
// Send object content.
|
||||
case objectStatCh <- object:
|
||||
// If receives done from the caller, return here.
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Send all common prefixes if any.
|
||||
// NOTE: prefixes are only present if the request is delimited.
|
||||
for _, obj := range result.CommonPrefixes {
|
||||
select {
|
||||
// Send object prefixes.
|
||||
case objectStatCh <- ObjectInfo{
|
||||
Key: obj.Prefix,
|
||||
Size: 0,
|
||||
}:
|
||||
// If receives done from the caller, return here.
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If continuation token present, save it for next request.
|
||||
if result.NextContinuationToken != "" {
|
||||
continuationToken = result.NextContinuationToken
|
||||
}
|
||||
|
||||
// Listing ends result is not truncated, return right here.
|
||||
if !result.IsTruncated {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(objectStatCh)
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||
// request parameters :-
|
||||
// ---------
|
||||
// ?continuation-token - Specifies the key to start with when listing objects in a bucket.
|
||||
// ?delimiter - A delimiter is a character you use to group keys.
|
||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ListBucketV2Result{}, err
|
||||
}
|
||||
// Validate object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
return ListBucketV2Result{}, err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
|
||||
// Always set list-type in ListObjects V2
|
||||
urlValues.Set("list-type", "2")
|
||||
|
||||
// Set object prefix.
|
||||
if objectPrefix != "" {
|
||||
urlValues.Set("prefix", objectPrefix)
|
||||
}
|
||||
// Set continuation token
|
||||
if continuationToken != "" {
|
||||
urlValues.Set("continuation-token", continuationToken)
|
||||
}
|
||||
// Set delimiter.
|
||||
if delimiter != "" {
|
||||
urlValues.Set("delimiter", delimiter)
|
||||
}
|
||||
|
||||
// Fetch owner when listing
|
||||
if fetchOwner {
|
||||
urlValues.Set("fetch-owner", "true")
|
||||
}
|
||||
|
||||
// maxkeys should default to 1000 or less.
|
||||
if maxkeys == 0 || maxkeys > 1000 {
|
||||
maxkeys = 1000
|
||||
}
|
||||
// Set max keys.
|
||||
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListBucketV2Result{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Decode listBuckets XML.
|
||||
listBucketResult := ListBucketV2Result{}
|
||||
if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
|
||||
// This is an additional verification check to make
|
||||
// sure proper responses are received.
|
||||
if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
|
||||
return listBucketResult, errors.New("Truncated response should have continuation token set")
|
||||
}
|
||||
|
||||
// Success.
|
||||
return listBucketResult, nil
|
||||
}
|
||||
|
||||
// ListObjects - (List Objects) - List some objects or all recursively.
|
||||
//
|
||||
// ListObjects lists all objects matching the objectPrefix from
|
||||
// the specified bucket. If recursion is enabled it would list
|
||||
// all subdirectories and all its contents.
|
||||
//
|
||||
// Your input parameters are just bucketName, objectPrefix, recursive
|
||||
// and a done channel for pro-actively closing the internal go
|
||||
// routine. If you enable recursive as 'true' this function will
|
||||
// return back all the objects in a given bucket name and object
|
||||
// prefix.
|
||||
//
|
||||
// api := client.New(....)
|
||||
// // Create a done channel.
|
||||
// doneCh := make(chan struct{})
|
||||
// defer close(doneCh)
|
||||
// // Recurively list all objects in 'mytestbucket'
|
||||
// recursive := true
|
||||
// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
|
||||
// fmt.Println(message)
|
||||
// }
|
||||
//
|
||||
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
|
||||
// Allocate new list objects channel.
|
||||
objectStatCh := make(chan ObjectInfo, 1)
|
||||
// Default listing is delimited at "/"
|
||||
delimiter := "/"
|
||||
if recursive {
|
||||
// If recursive we do not delimit.
|
||||
delimiter = ""
|
||||
}
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
// Validate incoming object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// Initiate list objects goroutine here.
|
||||
go func(objectStatCh chan<- ObjectInfo) {
|
||||
defer close(objectStatCh)
|
||||
// Save marker for next request.
|
||||
var marker string
|
||||
for {
|
||||
// Get list of objects a maximum of 1000 per request.
|
||||
result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
|
||||
if err != nil {
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If contents are available loop through and send over channel.
|
||||
for _, object := range result.Contents {
|
||||
// Save the marker.
|
||||
marker = object.Key
|
||||
select {
|
||||
// Send object content.
|
||||
case objectStatCh <- object:
|
||||
// If receives done from the caller, return here.
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Send all common prefixes if any.
|
||||
// NOTE: prefixes are only present if the request is delimited.
|
||||
for _, obj := range result.CommonPrefixes {
|
||||
object := ObjectInfo{}
|
||||
object.Key = obj.Prefix
|
||||
object.Size = 0
|
||||
select {
|
||||
// Send object prefixes.
|
||||
case objectStatCh <- object:
|
||||
// If receives done from the caller, return here.
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If next marker present, save it for next request.
|
||||
if result.NextMarker != "" {
|
||||
marker = result.NextMarker
|
||||
}
|
||||
|
||||
// Listing ends result is not truncated, return right here.
|
||||
if !result.IsTruncated {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(objectStatCh)
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||
// request parameters :-
|
||||
// ---------
|
||||
// ?marker - Specifies the key to start with when listing objects in a bucket.
|
||||
// ?delimiter - A delimiter is a character you use to group keys.
|
||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ListBucketResult{}, err
|
||||
}
|
||||
// Validate object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
return ListBucketResult{}, err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
// Set object prefix.
|
||||
if objectPrefix != "" {
|
||||
urlValues.Set("prefix", objectPrefix)
|
||||
}
|
||||
// Set object marker.
|
||||
if objectMarker != "" {
|
||||
urlValues.Set("marker", objectMarker)
|
||||
}
|
||||
// Set delimiter.
|
||||
if delimiter != "" {
|
||||
urlValues.Set("delimiter", delimiter)
|
||||
}
|
||||
|
||||
// maxkeys should default to 1000 or less.
|
||||
if maxkeys == 0 || maxkeys > 1000 {
|
||||
maxkeys = 1000
|
||||
}
|
||||
// Set max keys.
|
||||
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListBucketResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
// Decode listBuckets XML.
|
||||
listBucketResult := ListBucketResult{}
|
||||
err = xmlDecoder(resp.Body, &listBucketResult)
|
||||
if err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
return listBucketResult, nil
|
||||
}
|
||||
|
||||
// ListIncompleteUploads - List incompletely uploaded multipart objects.
|
||||
//
|
||||
// ListIncompleteUploads lists all incompleted objects matching the
|
||||
// objectPrefix from the specified bucket. If recursion is enabled
|
||||
// it would list all subdirectories and all its contents.
|
||||
//
|
||||
// Your input parameters are just bucketName, objectPrefix, recursive
|
||||
// and a done channel to pro-actively close the internal go routine.
|
||||
// If you enable recursive as 'true' this function will return back all
|
||||
// the multipart objects in a given bucket name.
|
||||
//
|
||||
// api := client.New(....)
|
||||
// // Create a done channel.
|
||||
// doneCh := make(chan struct{})
|
||||
// defer close(doneCh)
|
||||
// // Recurively list all objects in 'mytestbucket'
|
||||
// recursive := true
|
||||
// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
|
||||
// fmt.Println(message)
|
||||
// }
|
||||
//
|
||||
func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
|
||||
// Turn on size aggregation of individual parts.
|
||||
isAggregateSize := true
|
||||
return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
|
||||
}
|
||||
|
||||
// listIncompleteUploads lists all incomplete uploads.
|
||||
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
|
||||
// Allocate channel for multipart uploads.
|
||||
objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
|
||||
// Delimiter is set to "/" by default.
|
||||
delimiter := "/"
|
||||
if recursive {
|
||||
// If recursive do not delimit.
|
||||
delimiter = ""
|
||||
}
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(objectMultipartStatCh)
|
||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectMultipartStatCh
|
||||
}
|
||||
// Validate incoming object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
defer close(objectMultipartStatCh)
|
||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectMultipartStatCh
|
||||
}
|
||||
go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
|
||||
defer close(objectMultipartStatCh)
|
||||
// object and upload ID marker for future requests.
|
||||
var objectMarker string
|
||||
var uploadIDMarker string
|
||||
for {
|
||||
// list all multipart uploads.
|
||||
result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
|
||||
if err != nil {
|
||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
// Save objectMarker and uploadIDMarker for next request.
|
||||
objectMarker = result.NextKeyMarker
|
||||
uploadIDMarker = result.NextUploadIDMarker
|
||||
// Send all multipart uploads.
|
||||
for _, obj := range result.Uploads {
|
||||
// Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
|
||||
if aggregateSize {
|
||||
// Get total multipart size.
|
||||
obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
|
||||
if err != nil {
|
||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||
Err: err,
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
select {
|
||||
// Send individual uploads here.
|
||||
case objectMultipartStatCh <- obj:
|
||||
// If done channel return here.
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
// Send all common prefixes if any.
|
||||
// NOTE: prefixes are only present if the request is delimited.
|
||||
for _, obj := range result.CommonPrefixes {
|
||||
object := ObjectMultipartInfo{}
|
||||
object.Key = obj.Prefix
|
||||
object.Size = 0
|
||||
select {
|
||||
// Send delimited prefixes here.
|
||||
case objectMultipartStatCh <- object:
|
||||
// If done channel return here.
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
// Listing ends if result not truncated, return right here.
|
||||
if !result.IsTruncated {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(objectMultipartStatCh)
|
||||
// return.
|
||||
return objectMultipartStatCh
|
||||
}
|
||||
|
||||
// listMultipartUploads - (List Multipart Uploads).
|
||||
// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
|
||||
// request parameters. :-
|
||||
// ---------
|
||||
// ?key-marker - Specifies the multipart upload after which listing should begin.
|
||||
// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
|
||||
// ?delimiter - A delimiter is a character you use to group keys.
|
||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
|
||||
func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
|
||||
// Get resources properly escaped and lined up before using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
// Set uploads.
|
||||
urlValues.Set("uploads", "")
|
||||
// Set object key marker.
|
||||
if keyMarker != "" {
|
||||
urlValues.Set("key-marker", keyMarker)
|
||||
}
|
||||
// Set upload id marker.
|
||||
if uploadIDMarker != "" {
|
||||
urlValues.Set("upload-id-marker", uploadIDMarker)
|
||||
}
|
||||
// Set prefix marker.
|
||||
if prefix != "" {
|
||||
urlValues.Set("prefix", prefix)
|
||||
}
|
||||
// Set delimiter.
|
||||
if delimiter != "" {
|
||||
urlValues.Set("delimiter", delimiter)
|
||||
}
|
||||
|
||||
// maxUploads should be 1000 or less.
|
||||
if maxUploads == 0 || maxUploads > 1000 {
|
||||
maxUploads = 1000
|
||||
}
|
||||
// Set max-uploads.
|
||||
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
|
||||
|
||||
// Execute GET on bucketName to list multipart uploads.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListMultipartUploadsResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
// Decode response body.
|
||||
listMultipartUploadsResult := ListMultipartUploadsResult{}
|
||||
err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
|
||||
if err != nil {
|
||||
return listMultipartUploadsResult, err
|
||||
}
|
||||
return listMultipartUploadsResult, nil
|
||||
}
|
||||
|
||||
// listObjectParts list all object parts recursively.
|
||||
func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
|
||||
// Part number marker for the next batch of request.
|
||||
var nextPartNumberMarker int
|
||||
partsInfo = make(map[int]ObjectPart)
|
||||
for {
|
||||
// Get list of uploaded parts a maximum of 1000 per request.
|
||||
listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Append to parts info.
|
||||
for _, part := range listObjPartsResult.ObjectParts {
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
part.ETag = strings.TrimPrefix(part.ETag, "\"")
|
||||
part.ETag = strings.TrimSuffix(part.ETag, "\"")
|
||||
partsInfo[part.PartNumber] = part
|
||||
}
|
||||
// Keep part number marker, for the next iteration.
|
||||
nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
|
||||
// Listing ends result is not truncated, return right here.
|
||||
if !listObjPartsResult.IsTruncated {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return all the parts.
|
||||
return partsInfo, nil
|
||||
}
|
||||
|
||||
// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
|
||||
func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) {
|
||||
// Make list incomplete uploads recursive.
|
||||
isRecursive := true
|
||||
// Turn off size aggregation of individual parts, in this request.
|
||||
isAggregateSize := false
|
||||
// latestUpload to track the latest multipart info for objectName.
|
||||
var latestUpload ObjectMultipartInfo
|
||||
// Create done channel to cleanup the routine.
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
// List all incomplete uploads.
|
||||
for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
|
||||
if mpUpload.Err != nil {
|
||||
return "", mpUpload.Err
|
||||
}
|
||||
if objectName == mpUpload.Key {
|
||||
if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 {
|
||||
latestUpload = mpUpload
|
||||
}
|
||||
}
|
||||
}
|
||||
// Return the latest upload id.
|
||||
return latestUpload.UploadID, nil
|
||||
}
|
||||
|
||||
// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
|
||||
func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
|
||||
// Iterate over all parts and aggregate the size.
|
||||
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, partInfo := range partsInfo {
|
||||
size += partInfo.Size
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// listObjectPartsQuery (List Parts query)
|
||||
// - lists some or all (up to 1000) parts that have been uploaded
|
||||
// for a specific multipart upload
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return
|
||||
// a subset of the uploads in a bucket, request parameters :-
|
||||
// ---------
|
||||
// ?part-number-marker - Specifies the part after which listing should
|
||||
// begin.
|
||||
// ?max-parts - Maximum parts to be listed per request.
|
||||
func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
|
||||
// Get resources properly escaped and lined up before using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
// Set part number marker.
|
||||
urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
|
||||
// Set upload id.
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// maxParts should be 1000 or less.
|
||||
if maxParts == 0 || maxParts > 1000 {
|
||||
maxParts = 1000
|
||||
}
|
||||
// Set max parts.
|
||||
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
|
||||
|
||||
// Execute GET on objectName to get list of parts.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListObjectPartsResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
// Decode list object parts XML.
|
||||
listObjectPartsResult := ListObjectPartsResult{}
|
||||
err = xmlDecoder(resp.Body, &listObjectPartsResult)
|
||||
if err != nil {
|
||||
return listObjectPartsResult, err
|
||||
}
|
||||
return listObjectPartsResult, nil
|
||||
}
|
225
vendor/github.com/minio/minio-go/api-notification.go
generated
vendored
Normal file
225
vendor/github.com/minio/minio-go/api-notification.go
generated
vendored
Normal file
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// GetBucketNotification - get bucket notification at a given path.
|
||||
func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return BucketNotification{}, err
|
||||
}
|
||||
notification, err := c.getBucketNotification(bucketName)
|
||||
if err != nil {
|
||||
return BucketNotification{}, err
|
||||
}
|
||||
return notification, nil
|
||||
}
|
||||
|
||||
// Request server for notification rules.
|
||||
func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("notification", "")
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return BucketNotification{}, err
|
||||
}
|
||||
return processBucketNotificationResponse(bucketName, resp)
|
||||
|
||||
}
|
||||
|
||||
// processes the GetNotification http response from the server.
|
||||
func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errResponse := httpRespToErrorResponse(resp, bucketName, "")
|
||||
return BucketNotification{}, errResponse
|
||||
}
|
||||
var bucketNotification BucketNotification
|
||||
err := xmlDecoder(resp.Body, &bucketNotification)
|
||||
if err != nil {
|
||||
return BucketNotification{}, err
|
||||
}
|
||||
return bucketNotification, nil
|
||||
}
|
||||
|
||||
// Indentity represents the user id, this is a compliance field.
|
||||
type identity struct {
|
||||
PrincipalID string `json:"principalId"`
|
||||
}
|
||||
|
||||
// Notification event bucket metadata.
|
||||
type bucketMeta struct {
|
||||
Name string `json:"name"`
|
||||
OwnerIdentity identity `json:"ownerIdentity"`
|
||||
ARN string `json:"arn"`
|
||||
}
|
||||
|
||||
// Notification event object metadata.
|
||||
type objectMeta struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
ETag string `json:"eTag,omitempty"`
|
||||
VersionID string `json:"versionId,omitempty"`
|
||||
Sequencer string `json:"sequencer"`
|
||||
}
|
||||
|
||||
// Notification event server specific metadata.
|
||||
type eventMeta struct {
|
||||
SchemaVersion string `json:"s3SchemaVersion"`
|
||||
ConfigurationID string `json:"configurationId"`
|
||||
Bucket bucketMeta `json:"bucket"`
|
||||
Object objectMeta `json:"object"`
|
||||
}
|
||||
|
||||
// sourceInfo represents information on the client that
|
||||
// triggered the event notification.
|
||||
type sourceInfo struct {
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
UserAgent string `json:"userAgent"`
|
||||
}
|
||||
|
||||
// NotificationEvent represents an Amazon an S3 bucket notification event.
|
||||
type NotificationEvent struct {
|
||||
EventVersion string `json:"eventVersion"`
|
||||
EventSource string `json:"eventSource"`
|
||||
AwsRegion string `json:"awsRegion"`
|
||||
EventTime string `json:"eventTime"`
|
||||
EventName string `json:"eventName"`
|
||||
UserIdentity identity `json:"userIdentity"`
|
||||
RequestParameters map[string]string `json:"requestParameters"`
|
||||
ResponseElements map[string]string `json:"responseElements"`
|
||||
S3 eventMeta `json:"s3"`
|
||||
Source sourceInfo `json:"source"`
|
||||
}
|
||||
|
||||
// NotificationInfo - represents the collection of notification events, additionally
|
||||
// also reports errors if any while listening on bucket notifications.
|
||||
type NotificationInfo struct {
|
||||
Records []NotificationEvent
|
||||
Err error
|
||||
}
|
||||
|
||||
// ListenBucketNotification - listen on bucket notifications.
|
||||
func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
|
||||
notificationInfoCh := make(chan NotificationInfo, 1)
|
||||
// Only success, start a routine to start reading line by line.
|
||||
go func(notificationInfoCh chan<- NotificationInfo) {
|
||||
defer close(notificationInfoCh)
|
||||
|
||||
// Validate the bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
notificationInfoCh <- NotificationInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check ARN partition to verify if listening bucket is supported
|
||||
if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
notificationInfoCh <- NotificationInfo{
|
||||
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Continuously run and listen on bucket notification.
|
||||
// Create a done channel to control 'ListObjects' go routine.
|
||||
retryDoneCh := make(chan struct{}, 1)
|
||||
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
defer close(retryDoneCh)
|
||||
|
||||
// Wait on the jitter retry loop.
|
||||
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("prefix", prefix)
|
||||
urlValues.Set("suffix", suffix)
|
||||
urlValues["events"] = events
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod("GET", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate http response, upon error return quickly.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errResponse := httpRespToErrorResponse(resp, bucketName, "")
|
||||
notificationInfoCh <- NotificationInfo{
|
||||
Err: errResponse,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize a new bufio scanner, to read line by line.
|
||||
bio := bufio.NewScanner(resp.Body)
|
||||
|
||||
// Close the response body.
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Unmarshal each line, returns marshalled values.
|
||||
for bio.Scan() {
|
||||
var notificationInfo NotificationInfo
|
||||
if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
|
||||
continue
|
||||
}
|
||||
// Send notifications on channel only if there are events received.
|
||||
if len(notificationInfo.Records) > 0 {
|
||||
select {
|
||||
case notificationInfoCh <- notificationInfo:
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Look for any underlying errors.
|
||||
if err = bio.Err(); err != nil {
|
||||
// For an unexpected connection drop from server, we close the body
|
||||
// and re-connect.
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}(notificationInfoCh)
|
||||
|
||||
// Returns the notification info channel, for caller to start reading from.
|
||||
return notificationInfoCh
|
||||
}
|
211
vendor/github.com/minio/minio-go/api-presigned.go
generated
vendored
Normal file
211
vendor/github.com/minio/minio-go/api-presigned.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3signer"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// supportedGetReqParams - supported request parameters for GET presigned request.
|
||||
var supportedGetReqParams = map[string]struct{}{
|
||||
"response-expires": {},
|
||||
"response-content-type": {},
|
||||
"response-cache-control": {},
|
||||
"response-content-language": {},
|
||||
"response-content-encoding": {},
|
||||
"response-content-disposition": {},
|
||||
}
|
||||
|
||||
// presignURL - Returns a presigned URL for an input 'method'.
|
||||
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
||||
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
// Input validation.
|
||||
if method == "" {
|
||||
return nil, ErrInvalidArgument("method cannot be empty.")
|
||||
}
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := isValidExpiry(expires); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert expires into seconds.
|
||||
expireSeconds := int64(expires / time.Second)
|
||||
reqMetadata := requestMetadata{
|
||||
presignURL: true,
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
expires: expireSeconds,
|
||||
}
|
||||
|
||||
// For "GET" we are handling additional request parameters to
|
||||
// override its response headers.
|
||||
if method == "GET" {
|
||||
// Verify if input map has unsupported params, if yes exit.
|
||||
for k := range reqParams {
|
||||
if _, ok := supportedGetReqParams[k]; !ok {
|
||||
return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
|
||||
}
|
||||
}
|
||||
// Save the request parameters to be used in presigning for GET request.
|
||||
reqMetadata.queryValues = reqParams
|
||||
}
|
||||
|
||||
// Instantiate a new request.
|
||||
// Since expires is set newRequest will presign the request.
|
||||
req, err := c.newRequest(method, reqMetadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return req.URL, nil
|
||||
}
|
||||
|
||||
// PresignedGetObject - Returns a presigned URL to access an object
|
||||
// without credentials. Expires maximum is 7days - ie. 604800 and
|
||||
// minimum is 1. Additionally you can override a set of response
|
||||
// headers using the query parameters.
|
||||
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
|
||||
}
|
||||
|
||||
// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
|
||||
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
||||
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
|
||||
return c.presignURL("PUT", bucketName, objectName, expires, nil)
|
||||
}
|
||||
|
||||
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
|
||||
func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
|
||||
// Validate input arguments.
|
||||
if p.expiration.IsZero() {
|
||||
return nil, nil, errors.New("Expiration time must be specified")
|
||||
}
|
||||
if _, ok := p.formData["key"]; !ok {
|
||||
return nil, nil, errors.New("object key must be specified")
|
||||
}
|
||||
if _, ok := p.formData["bucket"]; !ok {
|
||||
return nil, nil, errors.New("bucket name must be specified")
|
||||
}
|
||||
|
||||
bucketName := p.formData["bucket"]
|
||||
// Fetch the bucket location.
|
||||
location, err := c.getBucketLocation(bucketName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err = c.makeTargetURL(bucketName, "", location, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
credValues, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = credValues.SignerType
|
||||
sessionToken = credValues.SessionToken
|
||||
accessKeyID = credValues.AccessKeyID
|
||||
secretAccessKey = credValues.SecretAccessKey
|
||||
)
|
||||
|
||||
if signerType.IsAnonymous() {
|
||||
return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials")
|
||||
}
|
||||
|
||||
// Keep time.
|
||||
t := time.Now().UTC()
|
||||
// For signature version '2' handle here.
|
||||
if signerType.IsV2() {
|
||||
policyBase64 := p.base64()
|
||||
p.formData["policy"] = policyBase64
|
||||
// For Google endpoint set this value to be 'GoogleAccessId'.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
p.formData["GoogleAccessId"] = accessKeyID
|
||||
} else {
|
||||
// For all other endpoints set this value to be 'AWSAccessKeyId'.
|
||||
p.formData["AWSAccessKeyId"] = accessKeyID
|
||||
}
|
||||
// Sign the policy.
|
||||
p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
|
||||
return u, p.formData, nil
|
||||
}
|
||||
|
||||
// Add date policy.
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-date",
|
||||
value: t.Format(iso8601DateFormat),
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Add algorithm policy.
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-algorithm",
|
||||
value: signV4Algorithm,
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Add a credential policy.
|
||||
credential := s3signer.GetCredential(accessKeyID, location, t)
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-credential",
|
||||
value: credential,
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if sessionToken != "" {
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-security-token",
|
||||
value: sessionToken,
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get base64 encoded policy.
|
||||
policyBase64 := p.base64()
|
||||
|
||||
// Fill in the form data.
|
||||
p.formData["policy"] = policyBase64
|
||||
p.formData["x-amz-algorithm"] = signV4Algorithm
|
||||
p.formData["x-amz-credential"] = credential
|
||||
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
|
||||
if sessionToken != "" {
|
||||
p.formData["x-amz-security-token"] = sessionToken
|
||||
}
|
||||
p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
|
||||
return u, p.formData, nil
|
||||
}
|
254
vendor/github.com/minio/minio-go/api-put-bucket.go
generated
vendored
Normal file
254
vendor/github.com/minio/minio-go/api-put-bucket.go
generated
vendored
Normal file
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
/// Bucket operations
|
||||
|
||||
// MakeBucket creates a new bucket with bucketName.
|
||||
//
|
||||
// Location is an optional argument, by default all buckets are
|
||||
// created in US Standard Region.
|
||||
//
|
||||
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
|
||||
func (c Client) MakeBucket(bucketName string, location string) (err error) {
|
||||
defer func() {
|
||||
// Save the location into cache on a successful makeBucket response.
|
||||
if err == nil {
|
||||
c.bucketLocCache.Set(bucketName, location)
|
||||
}
|
||||
}()
|
||||
|
||||
// Validate the input arguments.
|
||||
if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If location is empty, treat is a default region 'us-east-1'.
|
||||
if location == "" {
|
||||
location = "us-east-1"
|
||||
// For custom region clients, default
|
||||
// to custom region instead not 'us-east-1'.
|
||||
if c.region != "" {
|
||||
location = c.region
|
||||
}
|
||||
}
|
||||
// PUT bucket request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
bucketLocation: location,
|
||||
}
|
||||
|
||||
// If location is not 'us-east-1' create bucket location config.
|
||||
if location != "us-east-1" && location != "" {
|
||||
createBucketConfig := createBucketConfiguration{}
|
||||
createBucketConfig.Location = location
|
||||
var createBucketConfigBytes []byte
|
||||
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes)
|
||||
reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes)
|
||||
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
|
||||
reqMetadata.contentLength = int64(len(createBucketConfigBytes))
|
||||
}
|
||||
|
||||
// Execute PUT to create a new bucket.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBucketPolicy set the access permissions on an existing bucket.
|
||||
//
|
||||
// For example
|
||||
//
|
||||
// none - owner gets full access [default].
|
||||
// readonly - anonymous get access for everyone at a given object prefix.
|
||||
// readwrite - anonymous list/put/delete access to a given object prefix.
|
||||
// writeonly - anonymous put/delete access to a given object prefix.
|
||||
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bucketPolicy.IsValidBucketPolicy() {
|
||||
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
|
||||
}
|
||||
|
||||
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||
errResponse := ToErrorResponse(err)
|
||||
if err != nil && errResponse.Code != "NoSuchBucketPolicy" {
|
||||
return err
|
||||
}
|
||||
|
||||
if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
|
||||
// As the request is for removing policy and the bucket
|
||||
// has empty policy statements, just return success.
|
||||
return nil
|
||||
}
|
||||
|
||||
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
|
||||
|
||||
// Save the updated policies.
|
||||
return c.putBucketPolicy(bucketName, policyInfo)
|
||||
}
|
||||
|
||||
// Saves a new bucket policy.
|
||||
func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no policy statements, we should remove entire policy.
|
||||
if len(policyInfo.Statements) == 0 {
|
||||
return c.removeBucketPolicy(bucketName)
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
policyBytes, err := json.Marshal(&policyInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyBuffer := bytes.NewReader(policyBytes)
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: policyBuffer,
|
||||
contentLength: int64(len(policyBytes)),
|
||||
contentMD5Bytes: sumMD5(policyBytes),
|
||||
contentSHA256Bytes: sum256(policyBytes),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket policy.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes all policies on a bucket.
|
||||
func (c Client) removeBucketPolicy(bucketName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
// Execute DELETE on objectName.
|
||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBucketNotification saves a new bucket notification.
|
||||
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("notification", "")
|
||||
|
||||
notifBytes, err := xml.Marshal(bucketNotification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notifBuffer := bytes.NewReader(notifBytes)
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: notifBuffer,
|
||||
contentLength: int64(len(notifBytes)),
|
||||
contentMD5Bytes: sumMD5(notifBytes),
|
||||
contentSHA256Bytes: sum256(notifBytes),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket notification.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
|
||||
func (c Client) RemoveAllBucketNotification(bucketName string) error {
|
||||
return c.SetBucketNotification(bucketName, BucketNotification{})
|
||||
}
|
118
vendor/github.com/minio/minio-go/api-put-object-common.go
generated
vendored
Normal file
118
vendor/github.com/minio/minio-go/api-put-object-common.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// Verify if reader is *os.File
|
||||
func isFile(reader io.Reader) (ok bool) {
|
||||
_, ok = reader.(*os.File)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if reader is *minio.Object
|
||||
func isObject(reader io.Reader) (ok bool) {
|
||||
_, ok = reader.(*Object)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if reader is a generic ReaderAt
|
||||
func isReadAt(reader io.Reader) (ok bool) {
|
||||
_, ok = reader.(io.ReaderAt)
|
||||
return
|
||||
}
|
||||
|
||||
// optimalPartInfo - calculate the optimal part info for a given
|
||||
// object size.
|
||||
//
|
||||
// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
|
||||
// object storage it will have the following parameters as constants.
|
||||
//
|
||||
// maxPartsCount - 10000
|
||||
// minPartSize - 64MiB
|
||||
// maxMultipartPutObjectSize - 5TiB
|
||||
//
|
||||
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
|
||||
// object size is '-1' set it to 5TiB.
|
||||
if objectSize == -1 {
|
||||
objectSize = maxMultipartPutObjectSize
|
||||
}
|
||||
// object size is larger than supported maximum.
|
||||
if objectSize > maxMultipartPutObjectSize {
|
||||
err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
|
||||
return
|
||||
}
|
||||
// Use floats for part size for all calculations to avoid
|
||||
// overflows during float64 to int64 conversions.
|
||||
partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
|
||||
partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
|
||||
// Total parts count.
|
||||
totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
|
||||
// Part size.
|
||||
partSize = int64(partSizeFlt)
|
||||
// Last part size.
|
||||
lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
|
||||
return totalPartsCount, partSize, lastPartSize, nil
|
||||
}
|
||||
|
||||
// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
|
||||
func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
|
||||
hashWriter := writer
|
||||
for _, v := range hashAlgorithms {
|
||||
hashWriter = io.MultiWriter(hashWriter, v)
|
||||
}
|
||||
|
||||
// Copies to input at writer.
|
||||
size, err = io.CopyN(hashWriter, reader, partSize)
|
||||
if err != nil {
|
||||
// If not EOF return error right here.
|
||||
if err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range hashAlgorithms {
|
||||
hashSums[k] = v.Sum(nil)
|
||||
}
|
||||
return size, err
|
||||
}
|
||||
|
||||
// getUploadID - fetch upload id if already present for an object name
|
||||
// or initiate a new request to fetch a new upload id.
|
||||
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Initiate multipart upload for an object.
|
||||
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return initMultipartUploadResult.UploadID, nil
|
||||
}
|
22
vendor/github.com/minio/minio-go/api-put-object-copy.go
generated
vendored
Normal file
22
vendor/github.com/minio/minio-go/api-put-object-copy.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
// CopyObject - copy a source object into a new object
|
||||
func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
|
||||
return c.ComposeObject(dst, []SourceInfo{src})
|
||||
}
|
46
vendor/github.com/minio/minio-go/api-put-object-encrypted.go
generated
vendored
Normal file
46
vendor/github.com/minio/minio-go/api-put-object-encrypted.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio-go/pkg/encrypt"
|
||||
)
|
||||
|
||||
// PutEncryptedObject - Encrypt and store object.
|
||||
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
|
||||
if encryptMaterials == nil {
|
||||
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
|
||||
}
|
||||
|
||||
if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = make(map[string][]string)
|
||||
}
|
||||
|
||||
// Set the necessary encryption headers, for future decryption.
|
||||
metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()}
|
||||
metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()}
|
||||
metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
|
||||
|
||||
return c.putObjectMultipart(bucketName, objectName, encryptMaterials, -1, metadata, progress)
|
||||
}
|
66
vendor/github.com/minio/minio-go/api-put-object-file.go
generated
vendored
Normal file
66
vendor/github.com/minio/minio-go/api-put-object-file.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// FPutObject - Create an object in a bucket, with contents from file at filePath.
|
||||
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Open the referenced file.
|
||||
fileReader, err := os.Open(filePath)
|
||||
// If any error fail quickly here.
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer fileReader.Close()
|
||||
|
||||
// Save the file stat.
|
||||
fileStat, err := fileReader.Stat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Save the file size.
|
||||
fileSize := fileStat.Size()
|
||||
|
||||
objMetadata := make(map[string][]string)
|
||||
|
||||
// Set contentType based on filepath extension if not given or default
|
||||
// value of "binary/octet-stream" if the extension has no associated type.
|
||||
if contentType == "" {
|
||||
if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
}
|
||||
|
||||
objMetadata["Content-Type"] = []string{contentType}
|
||||
return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||
}
|
372
vendor/github.com/minio/minio-go/api-put-object-multipart.go
generated
vendored
Normal file
372
vendor/github.com/minio/minio-go/api-put-object-multipart.go
generated
vendored
Normal file
|
@ -0,0 +1,372 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64,
|
||||
metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, size, metadata, progress)
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, size int64,
|
||||
metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Total data read and written to server. should be equal to
|
||||
// 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, _, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Part number always starts with '1'.
|
||||
partNumber := 1
|
||||
|
||||
// Initialize a temporary buffer.
|
||||
tmpBuffer := new(bytes.Buffer)
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
for partNumber <= totalPartsCount {
|
||||
// Choose hash algorithms to be calculated by hashCopyN,
|
||||
// avoid sha256 with non-v4 signature request or
|
||||
// HTTPS connection.
|
||||
hashAlgos, hashSums := c.hashMaterials()
|
||||
|
||||
// Calculates hash sums while copying partSize bytes into tmpBuffer.
|
||||
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
|
||||
if rErr != nil && rErr != io.EOF {
|
||||
return 0, rErr
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
reader = newHook(tmpBuffer, progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
|
||||
hashSums["md5"], hashSums["sha256"], prtSize, metadata)
|
||||
if err != nil {
|
||||
// Reset the temporary buffer upon any error.
|
||||
tmpBuffer.Reset()
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Reset the temporary buffer.
|
||||
tmpBuffer.Reset()
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += prtSize
|
||||
|
||||
// Increment part number.
|
||||
partNumber++
|
||||
|
||||
// For unknown size, Read EOF we break away.
|
||||
// We do not have to upload till totalPartsCount.
|
||||
if size < 0 && rErr == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if size > 0 {
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
||||
|
||||
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
|
||||
func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return initiateMultipartUploadResult{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return initiateMultipartUploadResult{}, err
|
||||
}
|
||||
|
||||
// Initialize url queries.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("uploads", "")
|
||||
|
||||
// Set ContentType header.
|
||||
customHeader := make(http.Header)
|
||||
for k, v := range metadata {
|
||||
if len(v) > 0 {
|
||||
customHeader.Set(k, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Set a default content-type header if the latter is not provided
|
||||
if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
|
||||
customHeader.Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
customHeader: customHeader,
|
||||
}
|
||||
|
||||
// Execute POST on an objectName to initiate multipart upload.
|
||||
resp, err := c.executeMethod("POST", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return initiateMultipartUploadResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
// Decode xml for new multipart upload.
|
||||
initiateMultipartUploadResult := initiateMultipartUploadResult{}
|
||||
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
|
||||
if err != nil {
|
||||
return initiateMultipartUploadResult, err
|
||||
}
|
||||
return initiateMultipartUploadResult, nil
|
||||
}
|
||||
|
||||
const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
|
||||
|
||||
// uploadPart - Uploads a part in a multipart upload.
|
||||
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader,
|
||||
partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectPart{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectPart{}, err
|
||||
}
|
||||
if size > maxPartSize {
|
||||
return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
|
||||
}
|
||||
if size <= -1 {
|
||||
return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
|
||||
}
|
||||
if partNumber <= 0 {
|
||||
return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
|
||||
}
|
||||
if uploadID == "" {
|
||||
return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
// Set part number.
|
||||
urlValues.Set("partNumber", strconv.Itoa(partNumber))
|
||||
// Set upload id.
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Set encryption headers, if any.
|
||||
customHeader := make(http.Header)
|
||||
for k, v := range metadata {
|
||||
if len(v) > 0 {
|
||||
if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
|
||||
customHeader.Set(k, v[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
customHeader: customHeader,
|
||||
contentBody: reader,
|
||||
contentLength: size,
|
||||
contentMD5Bytes: md5Sum,
|
||||
contentSHA256Bytes: sha256Sum,
|
||||
}
|
||||
|
||||
// Execute PUT on each part.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectPart{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
// Once successfully uploaded, return completed part.
|
||||
objPart := ObjectPart{}
|
||||
objPart.Size = size
|
||||
objPart.PartNumber = partNumber
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||
objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
|
||||
return objPart, nil
|
||||
}
|
||||
|
||||
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
||||
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
|
||||
complete completeMultipartUpload) (completeMultipartUploadResult, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return completeMultipartUploadResult{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return completeMultipartUploadResult{}, err
|
||||
}
|
||||
|
||||
// Initialize url queries.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Marshal complete multipart body.
|
||||
completeMultipartUploadBytes, err := xml.Marshal(complete)
|
||||
if err != nil {
|
||||
return completeMultipartUploadResult{}, err
|
||||
}
|
||||
|
||||
// Instantiate all the complete multipart buffer.
|
||||
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentBody: completeMultipartUploadBuffer,
|
||||
contentLength: int64(len(completeMultipartUploadBytes)),
|
||||
contentSHA256Bytes: sum256(completeMultipartUploadBytes),
|
||||
}
|
||||
|
||||
// Execute POST to complete multipart upload for an objectName.
|
||||
resp, err := c.executeMethod("POST", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return completeMultipartUploadResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Read resp.Body into a []bytes to parse for Error response inside the body
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return completeMultipartUploadResult{}, err
|
||||
}
|
||||
// Decode completed multipart upload response on success.
|
||||
completeMultipartUploadResult := completeMultipartUploadResult{}
|
||||
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
|
||||
if err != nil {
|
||||
// xml parsing failure due to presence an ill-formed xml fragment
|
||||
return completeMultipartUploadResult, err
|
||||
} else if completeMultipartUploadResult.Bucket == "" {
|
||||
// xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
|
||||
// In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
|
||||
// of the members.
|
||||
|
||||
// Decode completed multipart upload response on failure
|
||||
completeMultipartUploadErr := ErrorResponse{}
|
||||
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
|
||||
if err != nil {
|
||||
// xml parsing failure due to presence an ill-formed xml fragment
|
||||
return completeMultipartUploadResult, err
|
||||
}
|
||||
return completeMultipartUploadResult, completeMultipartUploadErr
|
||||
}
|
||||
return completeMultipartUploadResult, nil
|
||||
}
|
436
vendor/github.com/minio/minio-go/api-put-object-streaming.go
generated
vendored
Normal file
436
vendor/github.com/minio/minio-go/api-put-object-streaming.go
generated
vendored
Normal file
|
@ -0,0 +1,436 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// PutObjectStreaming using AWS streaming signature V4
|
||||
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
|
||||
return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
|
||||
}
|
||||
|
||||
// putObjectMultipartStream - upload a large object using
|
||||
// multipart upload and streaming signature for signing payload.
|
||||
// Comprehensive put object operation involving multipart uploads.
|
||||
//
|
||||
// Following code handles these types of readers.
|
||||
//
|
||||
// - *os.File
|
||||
// - *minio.Object
|
||||
// - Any reader which has a method 'ReadAt()'
|
||||
//
|
||||
func (c Client) putObjectMultipartStream(bucketName, objectName string,
|
||||
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
|
||||
// Verify if reader is *minio.Object, *os.File or io.ReaderAt.
|
||||
// NOTE: Verification of object is kept for a specific purpose
|
||||
// while it is going to be duck typed similar to io.ReaderAt.
|
||||
// It is to indicate that *minio.Object implements io.ReaderAt.
|
||||
// and such a functionality is used in the subsequent code path.
|
||||
if isFile(reader) || !isObject(reader) && isReadAt(reader) {
|
||||
n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
|
||||
} else {
|
||||
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// uploadedPartRes - the response received from a part upload.
|
||||
type uploadedPartRes struct {
|
||||
Error error // Any error encountered while uploading the part.
|
||||
PartNum int // Number of the part uploaded.
|
||||
Size int64 // Size of the part uploaded.
|
||||
Part *ObjectPart
|
||||
}
|
||||
|
||||
type uploadPartReq struct {
|
||||
PartNum int // Number of the part uploaded.
|
||||
Part *ObjectPart // Size of the part uploaded.
|
||||
}
|
||||
|
||||
// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
|
||||
// Supports all readers which implements io.ReaderAt interface
|
||||
// (ReadAt method).
|
||||
//
|
||||
// NOTE: This function is meant to be used for all readers which
|
||||
// implement io.ReaderAt which allows us for resuming multipart
|
||||
// uploads but reading at an offset, which would avoid re-read the
|
||||
// data which was already uploaded. Internally this function uses
|
||||
// temporary files for staging all the data, these temporary files are
|
||||
// cleaned automatically when the caller i.e http client closes the
|
||||
// stream after uploading all the contents successfully.
|
||||
func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string,
|
||||
reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Aborts the multipart upload in progress, if the
|
||||
// function returns any error, since we do not resume
|
||||
// we should purge the parts which have been uploaded
|
||||
// to relinquish storage space.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Declare a channel that sends the next part number to be uploaded.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadPartsCh := make(chan uploadPartReq, 10000)
|
||||
|
||||
// Declare a channel that sends back the response of a part upload.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||
|
||||
// Used for readability, lastPartNumber is always totalPartsCount.
|
||||
lastPartNumber := totalPartsCount
|
||||
|
||||
// Send each part number to the channel to be processed.
|
||||
for p := 1; p <= totalPartsCount; p++ {
|
||||
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
|
||||
}
|
||||
close(uploadPartsCh)
|
||||
|
||||
// Receive each part number from the channel allowing three parallel uploads.
|
||||
for w := 1; w <= totalWorkers; w++ {
|
||||
go func() {
|
||||
// Each worker will draw from the part channel and upload in parallel.
|
||||
for uploadReq := range uploadPartsCh {
|
||||
|
||||
// If partNumber was not uploaded we calculate the missing
|
||||
// part offset and size. For all other part numbers we
|
||||
// calculate offset based on multiples of partSize.
|
||||
readOffset := int64(uploadReq.PartNum-1) * partSize
|
||||
|
||||
// As a special case if partNumber is lastPartNumber, we
|
||||
// calculate the offset based on the last part size.
|
||||
if uploadReq.PartNum == lastPartNumber {
|
||||
readOffset = (size - lastPartSize)
|
||||
partSize = lastPartSize
|
||||
}
|
||||
|
||||
// Get a section reader on a particular offset.
|
||||
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
|
||||
sectionReader, uploadReq.PartNum,
|
||||
nil, nil, partSize, metadata)
|
||||
if err != nil {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: 0,
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
uploadReq.Part = &objPart
|
||||
|
||||
// Send successful part info through the channel.
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: objPart.Size,
|
||||
PartNum: uploadReq.PartNum,
|
||||
Part: uploadReq.Part,
|
||||
Error: nil,
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Gather the responses as they occur and update any
|
||||
// progress bar.
|
||||
for u := 1; u <= totalPartsCount; u++ {
|
||||
uploadRes := <-uploadedPartsCh
|
||||
if uploadRes.Error != nil {
|
||||
return totalUploadedSize, uploadRes.Error
|
||||
}
|
||||
// Retrieve each uploaded part and store it to be completed.
|
||||
// part, ok := partsInfo[uploadRes.PartNum]
|
||||
part := uploadRes.Part
|
||||
if part == nil {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
|
||||
}
|
||||
// Update the totalUploadedSize.
|
||||
totalUploadedSize += uploadRes.Size
|
||||
// Store the parts to be completed in order.
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
||||
|
||||
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
|
||||
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiates a new multipart request
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Aborts the multipart upload if the function returns
|
||||
// any error, since we do not resume we should purge
|
||||
// the parts which have been uploaded to relinquish
|
||||
// storage space.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Part number always starts with '1'.
|
||||
var partNumber int
|
||||
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
hookReader := newHook(reader, progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
if partNumber == totalPartsCount {
|
||||
partSize = lastPartSize
|
||||
}
|
||||
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
|
||||
io.LimitReader(hookReader, partSize),
|
||||
partNumber, nil, nil, partSize, metadata)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += partSize
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if size > 0 {
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
||||
|
||||
// putObjectNoChecksum special function used Google Cloud Storage. This special function
|
||||
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
||||
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Size -1 is only supported on Google Cloud Storage, we error
|
||||
// out in all other situations.
|
||||
if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
return 0, ErrEntityTooSmall(size, bucketName, objectName)
|
||||
}
|
||||
if size > 0 {
|
||||
if isReadAt(reader) && !isObject(reader) {
|
||||
reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size)
|
||||
}
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset as we
|
||||
// read from the source.
|
||||
readSeeker := newHook(reader, progress)
|
||||
|
||||
// This function does not calculate sha256 and md5sum for payload.
|
||||
// Execute put object.
|
||||
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if st.Size != size {
|
||||
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// putObjectDo - executes the put object http operation.
|
||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Set headers.
|
||||
customHeader := make(http.Header)
|
||||
|
||||
// Set metadata to headers
|
||||
for k, v := range metaData {
|
||||
if len(v) > 0 {
|
||||
customHeader.Set(k, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
// If Content-Type is not provided, set the default application/octet-stream one
|
||||
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
|
||||
customHeader.Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
|
||||
// Populate request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
customHeader: customHeader,
|
||||
contentBody: reader,
|
||||
contentLength: size,
|
||||
contentMD5Bytes: md5Sum,
|
||||
contentSHA256Bytes: sha256Sum,
|
||||
}
|
||||
|
||||
// Execute PUT an objectName.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
var objInfo ObjectInfo
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
|
||||
// A success here means data was written to server successfully.
|
||||
objInfo.Size = size
|
||||
|
||||
// Return here.
|
||||
return objInfo, nil
|
||||
}
|
218
vendor/github.com/minio/minio-go/api-put-object.go
generated
vendored
Normal file
218
vendor/github.com/minio/minio-go/api-put-object.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// toInt - converts go value to its integer representation based
|
||||
// on the value kind if it is an integer.
|
||||
func toInt(value reflect.Value) (size int64) {
|
||||
size = -1
|
||||
if value.IsValid() {
|
||||
switch value.Kind() {
|
||||
case reflect.Int:
|
||||
fallthrough
|
||||
case reflect.Int8:
|
||||
fallthrough
|
||||
case reflect.Int16:
|
||||
fallthrough
|
||||
case reflect.Int32:
|
||||
fallthrough
|
||||
case reflect.Int64:
|
||||
size = value.Int()
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// getReaderSize - Determine the size of Reader if available.
|
||||
func getReaderSize(reader io.Reader) (size int64, err error) {
|
||||
size = -1
|
||||
if reader == nil {
|
||||
return -1, nil
|
||||
}
|
||||
// Verify if there is a method by name 'Size'.
|
||||
sizeFn := reflect.ValueOf(reader).MethodByName("Size")
|
||||
// Verify if there is a method by name 'Len'.
|
||||
lenFn := reflect.ValueOf(reader).MethodByName("Len")
|
||||
if sizeFn.IsValid() {
|
||||
if sizeFn.Kind() == reflect.Func {
|
||||
// Call the 'Size' function and save its return value.
|
||||
result := sizeFn.Call([]reflect.Value{})
|
||||
if len(result) == 1 {
|
||||
size = toInt(result[0])
|
||||
}
|
||||
}
|
||||
} else if lenFn.IsValid() {
|
||||
if lenFn.Kind() == reflect.Func {
|
||||
// Call the 'Len' function and save its return value.
|
||||
result := lenFn.Call([]reflect.Value{})
|
||||
if len(result) == 1 {
|
||||
size = toInt(result[0])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback to Stat() method, two possible Stat() structs exist.
|
||||
switch v := reader.(type) {
|
||||
case *os.File:
|
||||
var st os.FileInfo
|
||||
st, err = v.Stat()
|
||||
if err != nil {
|
||||
// Handle this case specially for "windows",
|
||||
// certain files for example 'Stdin', 'Stdout' and
|
||||
// 'Stderr' it is not allowed to fetch file information.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(err.Error(), "GetFileInformationByHandle") {
|
||||
return -1, nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
// Ignore if input is a directory, throw an error.
|
||||
if st.Mode().IsDir() {
|
||||
return -1, ErrInvalidArgument("Input file cannot be a directory.")
|
||||
}
|
||||
// Ignore 'Stdin', 'Stdout' and 'Stderr', since they
|
||||
// represent *os.File type but internally do not
|
||||
// implement Seekable calls. Ignore them and treat
|
||||
// them like a stream with unknown length.
|
||||
switch st.Name() {
|
||||
case "stdin", "stdout", "stderr":
|
||||
return
|
||||
// Ignore read/write stream of os.Pipe() which have unknown length too.
|
||||
case "|0", "|1":
|
||||
return
|
||||
}
|
||||
var pos int64
|
||||
pos, err = v.Seek(0, 1) // SeekCurrent.
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
size = st.Size() - pos
|
||||
case *Object:
|
||||
var st ObjectInfo
|
||||
st, err = v.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var pos int64
|
||||
pos, err = v.Seek(0, 1) // SeekCurrent.
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
size = st.Size - pos
|
||||
}
|
||||
}
|
||||
// Returns the size here.
|
||||
return size, err
|
||||
}
|
||||
|
||||
// completedParts is a collection of parts sortable by their part numbers.
|
||||
// used for sorting the uploaded parts before completing the multipart request.
|
||||
type completedParts []CompletePart
|
||||
|
||||
func (a completedParts) Len() int { return len(a) }
|
||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
// PutObject creates an object in a bucket.
|
||||
//
|
||||
// You must have WRITE permissions on a bucket to create an object.
|
||||
//
|
||||
// - For size smaller than 64MiB PutObject automatically does a
|
||||
// single atomic Put operation.
|
||||
// - For size larger than 64MiB PutObject automatically does a
|
||||
// multipart Put operation.
|
||||
// - For size input as -1 PutObject does a multipart Put operation
|
||||
// until input stream reaches EOF. Maximum object size that can
|
||||
// be uploaded through this operation will be 5TiB.
|
||||
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
|
||||
return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
|
||||
"Content-Type": []string{contentType},
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject()
|
||||
// but takes the size argument explicitly, this function avoids doing reflection
|
||||
// internally to figure out the size of input stream. Also if the input size is
|
||||
// lesser than 0 this function returns an error.
|
||||
func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
|
||||
}
|
||||
|
||||
// PutObjectWithMetadata using AWS streaming signature V4
|
||||
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
|
||||
}
|
||||
|
||||
// PutObjectWithProgress using AWS streaming signature V4
|
||||
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Size of the object.
|
||||
var size int64
|
||||
|
||||
// Get reader size.
|
||||
size, err = getReaderSize(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Check for largest object size allowed.
|
||||
if size > int64(maxMultipartPutObjectSize) {
|
||||
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
// NOTE: Streaming signature is not supported by GCS.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
// Do not compute MD5 for Google Cloud Storage.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
if c.overrideSignerType.IsV2() {
|
||||
if size > 0 && size < minPartSize {
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
// If size cannot be found on a stream, it is not possible
|
||||
// to upload using streaming signature, fall back to multipart.
|
||||
if size < 0 {
|
||||
return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
// Set streaming signature.
|
||||
c.overrideSignerType = credentials.SignatureV4Streaming
|
||||
|
||||
if size < minPartSize {
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
// For all sizes greater than 64MiB do multipart.
|
||||
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
288
vendor/github.com/minio/minio-go/api-remove.go
generated
vendored
Normal file
288
vendor/github.com/minio/minio-go/api-remove.go
generated
vendored
Normal file
|
@ -0,0 +1,288 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// RemoveBucket deletes the bucket name.
|
||||
//
|
||||
// All objects (including all object versions and delete markers).
|
||||
// in the bucket must be deleted before successfully attempting this request.
|
||||
func (c Client) RemoveBucket(bucketName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
// Execute DELETE on bucket.
|
||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the location from cache on a successful delete.
|
||||
c.bucketLocCache.Delete(bucketName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveObject remove an object from a bucket.
|
||||
func (c Client) RemoveObject(bucketName, objectName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
// Execute DELETE on objectName.
|
||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
// if some unexpected error happened and max retry is reached, we want to let client know
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteObject always responds with http '204' even for
|
||||
// objects which do not exist. So no need to handle them
|
||||
// specifically.
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveObjectError - container of Multi Delete S3 API error
|
||||
type RemoveObjectError struct {
|
||||
ObjectName string
|
||||
Err error
|
||||
}
|
||||
|
||||
// generateRemoveMultiObjects - generate the XML request for remove multi objects request
|
||||
func generateRemoveMultiObjectsRequest(objects []string) []byte {
|
||||
rmObjects := []deleteObject{}
|
||||
for _, obj := range objects {
|
||||
rmObjects = append(rmObjects, deleteObject{Key: obj})
|
||||
}
|
||||
xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
|
||||
return xmlBytes
|
||||
}
|
||||
|
||||
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
|
||||
// and return the success/failure result status for each object
|
||||
func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
|
||||
// Parse multi delete XML response
|
||||
rmResult := &deleteMultiObjectsResult{}
|
||||
err := xmlDecoder(body, rmResult)
|
||||
if err != nil {
|
||||
errorCh <- RemoveObjectError{ObjectName: "", Err: err}
|
||||
return
|
||||
}
|
||||
|
||||
// Fill deletion that returned an error.
|
||||
for _, obj := range rmResult.UnDeletedObjects {
|
||||
errorCh <- RemoveObjectError{
|
||||
ObjectName: obj.Key,
|
||||
Err: ErrorResponse{
|
||||
Code: obj.Code,
|
||||
Message: obj.Message,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveObjects remove multiples objects from a bucket.
|
||||
// The list of objects to remove are received from objectsCh.
|
||||
// Remove failures are sent back via error channel.
|
||||
func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
|
||||
errorCh := make(chan RemoveObjectError, 1)
|
||||
|
||||
// Validate if bucket name is valid.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(errorCh)
|
||||
errorCh <- RemoveObjectError{
|
||||
Err: err,
|
||||
}
|
||||
return errorCh
|
||||
}
|
||||
// Validate objects channel to be properly allocated.
|
||||
if objectsCh == nil {
|
||||
defer close(errorCh)
|
||||
errorCh <- RemoveObjectError{
|
||||
Err: ErrInvalidArgument("Objects channel cannot be nil"),
|
||||
}
|
||||
return errorCh
|
||||
}
|
||||
|
||||
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
|
||||
go func(errorCh chan<- RemoveObjectError) {
|
||||
maxEntries := 1000
|
||||
finish := false
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("delete", "")
|
||||
|
||||
// Close error channel when Multi delete finishes.
|
||||
defer close(errorCh)
|
||||
|
||||
// Loop over entries by 1000 and call MultiDelete requests
|
||||
for {
|
||||
if finish {
|
||||
break
|
||||
}
|
||||
count := 0
|
||||
var batch []string
|
||||
|
||||
// Try to gather 1000 entries
|
||||
for object := range objectsCh {
|
||||
batch = append(batch, object)
|
||||
if count++; count >= maxEntries {
|
||||
break
|
||||
}
|
||||
}
|
||||
if count == 0 {
|
||||
// Multi Objects Delete API doesn't accept empty object list, quit immediately
|
||||
break
|
||||
}
|
||||
if count < maxEntries {
|
||||
// We didn't have 1000 entries, so this is the last batch
|
||||
finish = true
|
||||
}
|
||||
|
||||
// Generate remove multi objects XML request
|
||||
removeBytes := generateRemoveMultiObjectsRequest(batch)
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod("POST", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(removeBytes),
|
||||
contentLength: int64(len(removeBytes)),
|
||||
contentMD5Bytes: sumMD5(removeBytes),
|
||||
contentSHA256Bytes: sum256(removeBytes),
|
||||
})
|
||||
if err != nil {
|
||||
for _, b := range batch {
|
||||
errorCh <- RemoveObjectError{ObjectName: b, Err: err}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Process multiobjects remove xml response
|
||||
processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
|
||||
|
||||
closeResponse(resp)
|
||||
}
|
||||
}(errorCh)
|
||||
return errorCh
|
||||
}
|
||||
|
||||
// RemoveIncompleteUpload aborts an partially uploaded object.
|
||||
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
// Find multipart upload id of the object to be aborted.
|
||||
uploadID, err := c.findUploadID(bucketName, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uploadID != "" {
|
||||
// Upload id found, abort the incomplete multipart upload.
|
||||
err := c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// abortMultipartUpload aborts a multipart upload for the given
|
||||
// uploadID, all previously uploaded parts are deleted.
|
||||
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize url queries.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Execute DELETE on multipart upload.
|
||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
// Abort has no response body, handle it for any errors.
|
||||
var errorResponse ErrorResponse
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
// This is needed specifically for abort and it cannot
|
||||
// be converged into default case.
|
||||
errorResponse = ErrorResponse{
|
||||
Code: "NoSuchUpload",
|
||||
Message: "The specified multipart upload does not exist.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||
HostID: resp.Header.Get("x-amz-id-2"),
|
||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||
}
|
||||
default:
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
return errorResponse
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
244
vendor/github.com/minio/minio-go/api-s3-datatypes.go
generated
vendored
Normal file
244
vendor/github.com/minio/minio-go/api-s3-datatypes.go
generated
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// listAllMyBucketsResult container for listBuckets response.
|
||||
type listAllMyBucketsResult struct {
|
||||
// Container for one or more buckets.
|
||||
Buckets struct {
|
||||
Bucket []BucketInfo
|
||||
}
|
||||
Owner owner
|
||||
}
|
||||
|
||||
// owner container for bucket owner information.
|
||||
type owner struct {
|
||||
DisplayName string
|
||||
ID string
|
||||
}
|
||||
|
||||
// CommonPrefix container for prefix response.
|
||||
type CommonPrefix struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListBucketV2Result container for listObjects response version 2.
|
||||
type ListBucketV2Result struct {
|
||||
// A response can contain CommonPrefixes only if you have
|
||||
// specified a delimiter.
|
||||
CommonPrefixes []CommonPrefix
|
||||
// Metadata about each object returned.
|
||||
Contents []ObjectInfo
|
||||
Delimiter string
|
||||
|
||||
// Encoding type used to encode object keys in the response.
|
||||
EncodingType string
|
||||
|
||||
// A flag that indicates whether or not ListObjects returned all of the results
|
||||
// that satisfied the search criteria.
|
||||
IsTruncated bool
|
||||
MaxKeys int64
|
||||
Name string
|
||||
|
||||
// Hold the token that will be sent in the next request to fetch the next group of keys
|
||||
NextContinuationToken string
|
||||
|
||||
ContinuationToken string
|
||||
Prefix string
|
||||
|
||||
// FetchOwner and StartAfter are currently not used
|
||||
FetchOwner string
|
||||
StartAfter string
|
||||
}
|
||||
|
||||
// ListBucketResult container for listObjects response.
|
||||
type ListBucketResult struct {
|
||||
// A response can contain CommonPrefixes only if you have
|
||||
// specified a delimiter.
|
||||
CommonPrefixes []CommonPrefix
|
||||
// Metadata about each object returned.
|
||||
Contents []ObjectInfo
|
||||
Delimiter string
|
||||
|
||||
// Encoding type used to encode object keys in the response.
|
||||
EncodingType string
|
||||
|
||||
// A flag that indicates whether or not ListObjects returned all of the results
|
||||
// that satisfied the search criteria.
|
||||
IsTruncated bool
|
||||
Marker string
|
||||
MaxKeys int64
|
||||
Name string
|
||||
|
||||
// When response is truncated (the IsTruncated element value in
|
||||
// the response is true), you can use the key name in this field
|
||||
// as marker in the subsequent request to get next set of objects.
|
||||
// Object storage lists objects in alphabetical order Note: This
|
||||
// element is returned only if you have delimiter request
|
||||
// parameter specified. If response does not include the NextMaker
|
||||
// and it is truncated, you can use the value of the last Key in
|
||||
// the response as the marker in the subsequent request to get the
|
||||
// next set of object keys.
|
||||
NextMarker string
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListMultipartUploadsResult container for ListMultipartUploads response
|
||||
type ListMultipartUploadsResult struct {
|
||||
Bucket string
|
||||
KeyMarker string
|
||||
UploadIDMarker string `xml:"UploadIdMarker"`
|
||||
NextKeyMarker string
|
||||
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
|
||||
EncodingType string
|
||||
MaxUploads int64
|
||||
IsTruncated bool
|
||||
Uploads []ObjectMultipartInfo `xml:"Upload"`
|
||||
Prefix string
|
||||
Delimiter string
|
||||
// A response can contain CommonPrefixes only if you specify a delimiter.
|
||||
CommonPrefixes []CommonPrefix
|
||||
}
|
||||
|
||||
// initiator container for who initiated multipart upload.
|
||||
type initiator struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// copyObjectResult container for copy object response.
|
||||
type copyObjectResult struct {
|
||||
ETag string
|
||||
LastModified string // time string format "2006-01-02T15:04:05.000Z"
|
||||
}
|
||||
|
||||
// ObjectPart container for particular part of an object.
|
||||
type ObjectPart struct {
|
||||
// Part number identifies the part.
|
||||
PartNumber int
|
||||
|
||||
// Date and time the part was uploaded.
|
||||
LastModified time.Time
|
||||
|
||||
// Entity tag returned when the part was uploaded, usually md5sum
|
||||
// of the part.
|
||||
ETag string
|
||||
|
||||
// Size of the uploaded part data.
|
||||
Size int64
|
||||
}
|
||||
|
||||
// ListObjectPartsResult container for ListObjectParts response.
|
||||
type ListObjectPartsResult struct {
|
||||
Bucket string
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
|
||||
Initiator initiator
|
||||
Owner owner
|
||||
|
||||
StorageClass string
|
||||
PartNumberMarker int
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
|
||||
// Indicates whether the returned list of parts is truncated.
|
||||
IsTruncated bool
|
||||
ObjectParts []ObjectPart `xml:"Part"`
|
||||
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// initiateMultipartUploadResult container for InitiateMultiPartUpload
|
||||
// response.
|
||||
type initiateMultipartUploadResult struct {
|
||||
Bucket string
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
}
|
||||
|
||||
// completeMultipartUploadResult container for completed multipart
|
||||
// upload response.
|
||||
type completeMultipartUploadResult struct {
|
||||
Location string
|
||||
Bucket string
|
||||
Key string
|
||||
ETag string
|
||||
}
|
||||
|
||||
// CompletePart sub container lists individual part numbers and their
|
||||
// md5sum, part of completeMultipartUpload.
|
||||
type CompletePart struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
|
||||
|
||||
// Part number identifies the part.
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
|
||||
// completeMultipartUpload container for completing multipart upload.
|
||||
type completeMultipartUpload struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
|
||||
Parts []CompletePart `xml:"Part"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration.
|
||||
type createBucketConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
||||
Location string `xml:"LocationConstraint"`
|
||||
}
|
||||
|
||||
// deleteObject container for Delete element in MultiObjects Delete XML request
|
||||
type deleteObject struct {
|
||||
Key string
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
}
|
||||
|
||||
// deletedObject container for Deleted element in MultiObjects Delete XML response
|
||||
type deletedObject struct {
|
||||
Key string
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
// These fields are ignored.
|
||||
DeleteMarker bool
|
||||
DeleteMarkerVersionID string
|
||||
}
|
||||
|
||||
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
|
||||
type nonDeletedObject struct {
|
||||
Key string
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
// deletedMultiObjects container for MultiObjects Delete XML request
|
||||
type deleteMultiObjects struct {
|
||||
XMLName xml.Name `xml:"Delete"`
|
||||
Quiet bool
|
||||
Objects []deleteObject `xml:"Object"`
|
||||
}
|
||||
|
||||
// deletedMultiObjectsResult container for MultiObjects Delete XML response
|
||||
type deleteMultiObjectsResult struct {
|
||||
XMLName xml.Name `xml:"DeleteResult"`
|
||||
DeletedObjects []deletedObject `xml:"Deleted"`
|
||||
UnDeletedObjects []nonDeletedObject `xml:"Error"`
|
||||
}
|
184
vendor/github.com/minio/minio-go/api-stat.go
generated
vendored
Normal file
184
vendor/github.com/minio/minio-go/api-stat.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// BucketExists verify if bucket exists and you have permission to access it.
|
||||
func (c Client) BucketExists(bucketName string) (bool, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Execute HEAD on bucketName.
|
||||
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
if ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return false, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List of header keys to be filtered, usually
|
||||
// from all S3 API http responses.
|
||||
var defaultFilterKeys = []string{
|
||||
"Connection",
|
||||
"Transfer-Encoding",
|
||||
"Accept-Ranges",
|
||||
"Date",
|
||||
"Server",
|
||||
"Vary",
|
||||
"x-amz-bucket-region",
|
||||
"x-amz-request-id",
|
||||
"x-amz-id-2",
|
||||
// Add new headers to be ignored.
|
||||
}
|
||||
|
||||
// Extract only necessary metadata header key/values by
|
||||
// filtering them out with a list of custom header keys.
|
||||
func extractObjMetadata(header http.Header) http.Header {
|
||||
filterKeys := append([]string{
|
||||
"ETag",
|
||||
"Content-Length",
|
||||
"Last-Modified",
|
||||
"Content-Type",
|
||||
}, defaultFilterKeys...)
|
||||
return filterHeader(header, filterKeys)
|
||||
}
|
||||
|
||||
// StatObject verifies if object exists and you have permission to access.
|
||||
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
reqHeaders := NewHeadReqHeaders()
|
||||
return c.statObject(bucketName, objectName, reqHeaders)
|
||||
}
|
||||
|
||||
// Lower level API for statObject supporting pre-conditions and range headers.
|
||||
func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
customHeader := make(http.Header)
|
||||
for k, v := range reqHeaders.Header {
|
||||
customHeader[k] = v
|
||||
}
|
||||
|
||||
// Execute HEAD on objectName.
|
||||
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
contentSHA256Bytes: emptySHA256,
|
||||
customHeader: customHeader,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||
md5sum = strings.TrimSuffix(md5sum, "\"")
|
||||
|
||||
// Parse content length is exists
|
||||
var size int64 = -1
|
||||
contentLengthStr := resp.Header.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
size, err = strconv.ParseInt(contentLengthStr, 10, 64)
|
||||
if err != nil {
|
||||
// Content-Length is not valid
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Message: "Content-Length is invalid. " + reportIssue,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||
HostID: resp.Header.Get("x-amz-id-2"),
|
||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse Last-Modified has http time format.
|
||||
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Message: "Last-Modified time format is invalid. " + reportIssue,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||
HostID: resp.Header.Get("x-amz-id-2"),
|
||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch content type if any present.
|
||||
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
|
||||
// Extract only the relevant header keys describing the object.
|
||||
// following function filters out a list of standard set of keys
|
||||
// which are not part of object metadata.
|
||||
metadata := extractObjMetadata(resp.Header)
|
||||
|
||||
// Save object metadata info.
|
||||
return ObjectInfo{
|
||||
ETag: md5sum,
|
||||
Key: objectName,
|
||||
Size: size,
|
||||
LastModified: date,
|
||||
ContentType: contentType,
|
||||
Metadata: metadata,
|
||||
}, nil
|
||||
}
|
801
vendor/github.com/minio/minio-go/api.go
generated
vendored
Normal file
801
vendor/github.com/minio/minio-go/api.go
generated
vendored
Normal file
|
@ -0,0 +1,801 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/s3signer"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// Client implements Amazon S3 compatible methods.
|
||||
type Client struct {
|
||||
/// Standard options.
|
||||
|
||||
// Parsed endpoint url provided by the user.
|
||||
endpointURL url.URL
|
||||
|
||||
// Holds various credential providers.
|
||||
credsProvider *credentials.Credentials
|
||||
|
||||
// Custom signerType value overrides all credentials.
|
||||
overrideSignerType credentials.SignatureType
|
||||
|
||||
// User supplied.
|
||||
appInfo struct {
|
||||
appName string
|
||||
appVersion string
|
||||
}
|
||||
|
||||
// Indicate whether we are using https or not
|
||||
secure bool
|
||||
|
||||
// Needs allocation.
|
||||
httpClient *http.Client
|
||||
bucketLocCache *bucketLocationCache
|
||||
|
||||
// Advanced functionality.
|
||||
isTraceEnabled bool
|
||||
traceOutput io.Writer
|
||||
|
||||
// S3 specific accelerated endpoint.
|
||||
s3AccelerateEndpoint string
|
||||
|
||||
// Region endpoint
|
||||
region string
|
||||
|
||||
// Random seed.
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "3.0.0"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
// Please open an issue to discuss any new changes here.
|
||||
//
|
||||
// Minio (OS; ARCH) LIB/VER APP/VER
|
||||
const (
|
||||
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
|
||||
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
|
||||
)
|
||||
|
||||
// NewV2 - instantiate minio client with Amazon S3 signature version
|
||||
// '2' compatibility.
|
||||
func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
|
||||
creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
|
||||
clnt, err := privateNew(endpoint, creds, secure, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clnt.overrideSignerType = credentials.SignatureV2
|
||||
return clnt, nil
|
||||
}
|
||||
|
||||
// NewV4 - instantiate minio client with Amazon S3 signature version
|
||||
// '4' compatibility.
|
||||
func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
|
||||
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
|
||||
clnt, err := privateNew(endpoint, creds, secure, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clnt.overrideSignerType = credentials.SignatureV4
|
||||
return clnt, nil
|
||||
}
|
||||
|
||||
// New - instantiate minio client, adds automatic verification of signature.
|
||||
func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
|
||||
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
|
||||
clnt, err := privateNew(endpoint, creds, secure, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Google cloud storage should be set to signature V2, force it if not.
|
||||
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
|
||||
clnt.overrideSignerType = credentials.SignatureV2
|
||||
}
|
||||
// If Amazon S3 set to signature v4.
|
||||
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
|
||||
clnt.overrideSignerType = credentials.SignatureV4
|
||||
}
|
||||
return clnt, nil
|
||||
}
|
||||
|
||||
// NewWithCredentials - instantiate minio client with credentials provider
|
||||
// for retrieving credentials from various credentials provider such as
|
||||
// IAM, File, Env etc.
|
||||
func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
|
||||
return privateNew(endpoint, creds, secure, region)
|
||||
}
|
||||
|
||||
// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
|
||||
// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
|
||||
// Use this function when if your application deals with single region.
|
||||
func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
|
||||
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
|
||||
return privateNew(endpoint, creds, secure, region)
|
||||
}
|
||||
|
||||
// lockedRandSource provides protected rand source, implements rand.Source interface.
|
||||
type lockedRandSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
|
||||
func (r *lockedRandSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Seed uses the provided seed value to initialize the generator to a
|
||||
// deterministic state.
|
||||
func (r *lockedRandSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
||||
|
||||
// redirectHeaders copies all headers when following a redirect URL.
|
||||
// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
|
||||
func redirectHeaders(req *http.Request, via []*http.Request) error {
|
||||
if len(via) == 0 {
|
||||
return nil
|
||||
}
|
||||
for key, val := range via[0].Header {
|
||||
req.Header[key] = val
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
|
||||
// construct endpoint.
|
||||
endpointURL, err := getEndpointURL(endpoint, secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// instantiate new Client.
|
||||
clnt := new(Client)
|
||||
|
||||
// Save the credentials.
|
||||
clnt.credsProvider = creds
|
||||
|
||||
// Remember whether we are using https or not
|
||||
clnt.secure = secure
|
||||
|
||||
// Save endpoint URL, user agent for future uses.
|
||||
clnt.endpointURL = *endpointURL
|
||||
|
||||
// Instantiate http client and bucket location cache.
|
||||
clnt.httpClient = &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
CheckRedirect: redirectHeaders,
|
||||
}
|
||||
|
||||
// Sets custom region, if region is empty bucket location cache is used automatically.
|
||||
clnt.region = region
|
||||
|
||||
// Instantiate bucket location cache.
|
||||
clnt.bucketLocCache = newBucketLocationCache()
|
||||
|
||||
// Introduce a new locked random seed.
|
||||
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
|
||||
|
||||
// Return.
|
||||
return clnt, nil
|
||||
}
|
||||
|
||||
// SetAppInfo - add application details to user agent.
|
||||
func (c *Client) SetAppInfo(appName string, appVersion string) {
|
||||
// if app name and version not set, we do not set a new user agent.
|
||||
if appName != "" && appVersion != "" {
|
||||
c.appInfo = struct {
|
||||
appName string
|
||||
appVersion string
|
||||
}{}
|
||||
c.appInfo.appName = appName
|
||||
c.appInfo.appVersion = appVersion
|
||||
}
|
||||
}
|
||||
|
||||
// SetCustomTransport - set new custom transport.
|
||||
func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
|
||||
// Set this to override default transport
|
||||
// ``http.DefaultTransport``.
|
||||
//
|
||||
// This transport is usually needed for debugging OR to add your
|
||||
// own custom TLS certificates on the client transport, for custom
|
||||
// CA's and certs which are not part of standard certificate
|
||||
// authority follow this example :-
|
||||
//
|
||||
// tr := &http.Transport{
|
||||
// TLSClientConfig: &tls.Config{RootCAs: pool},
|
||||
// DisableCompression: true,
|
||||
// }
|
||||
// api.SetTransport(tr)
|
||||
//
|
||||
if c.httpClient != nil {
|
||||
c.httpClient.Transport = customHTTPTransport
|
||||
}
|
||||
}
|
||||
|
||||
// TraceOn - enable HTTP tracing.
|
||||
func (c *Client) TraceOn(outputStream io.Writer) {
|
||||
// if outputStream is nil then default to os.Stdout.
|
||||
if outputStream == nil {
|
||||
outputStream = os.Stdout
|
||||
}
|
||||
// Sets a new output stream.
|
||||
c.traceOutput = outputStream
|
||||
|
||||
// Enable tracing.
|
||||
c.isTraceEnabled = true
|
||||
}
|
||||
|
||||
// TraceOff - disable HTTP tracing.
|
||||
func (c *Client) TraceOff() {
|
||||
// Disable tracing.
|
||||
c.isTraceEnabled = false
|
||||
}
|
||||
|
||||
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
|
||||
// requests. This feature is only specific to S3 for all other endpoints this
|
||||
// function does nothing. To read further details on s3 transfer acceleration
|
||||
// please vist -
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
|
||||
if s3utils.IsAmazonEndpoint(c.endpointURL) {
|
||||
c.s3AccelerateEndpoint = accelerateEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
// Hash materials provides relevant initialized hash algo writers
|
||||
// based on the expected signature type.
|
||||
//
|
||||
// - For signature v4 request if the connection is insecure compute only sha256.
|
||||
// - For signature v4 request if the connection is secure compute only md5.
|
||||
// - For anonymous request compute md5.
|
||||
func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
|
||||
hashSums = make(map[string][]byte)
|
||||
hashAlgos = make(map[string]hash.Hash)
|
||||
if c.overrideSignerType.IsV4() {
|
||||
if c.secure {
|
||||
hashAlgos["md5"] = md5.New()
|
||||
} else {
|
||||
hashAlgos["sha256"] = sha256.New()
|
||||
}
|
||||
} else {
|
||||
if c.overrideSignerType.IsAnonymous() {
|
||||
hashAlgos["md5"] = md5.New()
|
||||
}
|
||||
}
|
||||
return hashAlgos, hashSums
|
||||
}
|
||||
|
||||
// requestMetadata - is container for all the values to make a request.
|
||||
type requestMetadata struct {
|
||||
// If set newRequest presigns the URL.
|
||||
presignURL bool
|
||||
|
||||
// User supplied.
|
||||
bucketName string
|
||||
objectName string
|
||||
queryValues url.Values
|
||||
customHeader http.Header
|
||||
expires int64
|
||||
|
||||
// Generated by our internal code.
|
||||
bucketLocation string
|
||||
contentBody io.Reader
|
||||
contentLength int64
|
||||
contentSHA256Bytes []byte
|
||||
contentMD5Bytes []byte
|
||||
}
|
||||
|
||||
// dumpHTTP - dump HTTP request and response.
|
||||
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
||||
// Starts http dump.
|
||||
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter out Signature field from Authorization header.
|
||||
origAuth := req.Header.Get("Authorization")
|
||||
if origAuth != "" {
|
||||
req.Header.Set("Authorization", redactSignature(origAuth))
|
||||
}
|
||||
|
||||
// Only display request header.
|
||||
reqTrace, err := httputil.DumpRequestOut(req, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write request to trace output.
|
||||
_, err = fmt.Fprint(c.traceOutput, string(reqTrace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only display response header.
|
||||
var respTrace []byte
|
||||
|
||||
// For errors we make sure to dump response body as well.
|
||||
if resp.StatusCode != http.StatusOK &&
|
||||
resp.StatusCode != http.StatusPartialContent &&
|
||||
resp.StatusCode != http.StatusNoContent {
|
||||
respTrace, err = httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// WORKAROUND for https://github.com/golang/go/issues/13942.
|
||||
// httputil.DumpResponse does not print response headers for
|
||||
// all successful calls which have response ContentLength set
|
||||
// to zero. Keep this workaround until the above bug is fixed.
|
||||
if resp.ContentLength == 0 {
|
||||
var buffer bytes.Buffer
|
||||
if err = resp.Header.Write(&buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
respTrace = buffer.Bytes()
|
||||
respTrace = append(respTrace, []byte("\r\n")...)
|
||||
} else {
|
||||
respTrace, err = httputil.DumpResponse(resp, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Write response to trace output.
|
||||
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ends the http dump.
|
||||
_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// do - execute http request.
|
||||
func (c Client) do(req *http.Request) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
// Do the request in a loop in case of 307 http is met since golang still doesn't
|
||||
// handle properly this situation (https://github.com/golang/go/issues/7912)
|
||||
for {
|
||||
resp, err = c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
// Handle this specifically for now until future Golang
|
||||
// versions fix this issue properly.
|
||||
urlErr, ok := err.(*url.Error)
|
||||
if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
|
||||
return nil, &url.Error{
|
||||
Op: urlErr.Op,
|
||||
URL: urlErr.URL,
|
||||
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
|
||||
if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
|
||||
newURL, err := url.Parse(resp.Header.Get("Location"))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
req.URL = newURL
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Response cannot be non-nil, report if its the case.
|
||||
if resp == nil {
|
||||
msg := "Response is empty. " + reportIssue
|
||||
return nil, ErrInvalidArgument(msg)
|
||||
}
|
||||
|
||||
// If trace is enabled, dump http request and response.
|
||||
if c.isTraceEnabled {
|
||||
err = c.dumpHTTP(req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// List of success status.
|
||||
var successStatus = []int{
|
||||
http.StatusOK,
|
||||
http.StatusNoContent,
|
||||
http.StatusPartialContent,
|
||||
}
|
||||
|
||||
// executeMethod - instantiates a given method, and retries the
|
||||
// request upon any error up to maxRetries attempts in a binomially
|
||||
// delayed manner using a standard back off algorithm.
|
||||
func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
|
||||
var isRetryable bool // Indicates if request can be retried.
|
||||
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
|
||||
if metadata.contentBody != nil {
|
||||
// Check if body is seekable then it is retryable.
|
||||
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
|
||||
switch bodySeeker {
|
||||
case os.Stdin, os.Stdout, os.Stderr:
|
||||
isRetryable = false
|
||||
}
|
||||
// Figure out if the body can be closed - if yes
|
||||
// we will definitely close it upon the function
|
||||
// return.
|
||||
bodyCloser, ok := metadata.contentBody.(io.Closer)
|
||||
if ok {
|
||||
defer bodyCloser.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Create a done channel to control 'newRetryTimer' go routine.
|
||||
doneCh := make(chan struct{}, 1)
|
||||
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
defer close(doneCh)
|
||||
|
||||
// Blank indentifier is kept here on purpose since 'range' without
|
||||
// blank identifiers is only supported since go1.4
|
||||
// https://golang.org/doc/go1.4#forrange.
|
||||
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
|
||||
// Retry executes the following function body if request has an
|
||||
// error until maxRetries have been exhausted, retry attempts are
|
||||
// performed after waiting for a given period of time in a
|
||||
// binomial fashion.
|
||||
if isRetryable {
|
||||
// Seek back to beginning for each attempt.
|
||||
if _, err = bodySeeker.Seek(0, 0); err != nil {
|
||||
// If seek failed, no need to retry.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Instantiate a new request.
|
||||
var req *http.Request
|
||||
req, err = c.newRequest(method, metadata)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if isS3CodeRetryable(errResponse.Code) {
|
||||
continue // Retry.
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initiate the request.
|
||||
res, err = c.do(req)
|
||||
if err != nil {
|
||||
// For supported network errors verify.
|
||||
if isNetErrorRetryable(err) {
|
||||
continue // Retry.
|
||||
}
|
||||
// For other errors, return here no need to retry.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For any known successful http status, return quickly.
|
||||
for _, httpStatus := range successStatus {
|
||||
if httpStatus == res.StatusCode {
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Read the body to be saved later.
|
||||
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
||||
// res.Body should be closed
|
||||
closeResponse(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save the body.
|
||||
errBodySeeker := bytes.NewReader(errBodyBytes)
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
|
||||
// For errors verify if its retryable otherwise fail quickly.
|
||||
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||
|
||||
// Save the body back again.
|
||||
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
|
||||
// Bucket region if set in error response and the error
|
||||
// code dictates invalid region, we can retry the request
|
||||
// with the new region.
|
||||
//
|
||||
// Additionally we should only retry if bucketLocation and custom
|
||||
// region is empty.
|
||||
if metadata.bucketLocation == "" && c.region == "" {
|
||||
if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
|
||||
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||
continue // Retry.
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if error response code is retryable.
|
||||
if isS3CodeRetryable(errResponse.Code) {
|
||||
continue // Retry.
|
||||
}
|
||||
|
||||
// Verify if http status code is retryable.
|
||||
if isHTTPStatusRetryable(res.StatusCode) {
|
||||
continue // Retry.
|
||||
}
|
||||
|
||||
// For all other cases break out of the retry loop.
|
||||
break
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
// newRequest - instantiate a new HTTP request for a given method.
|
||||
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
|
||||
// If no method is supplied default to 'POST'.
|
||||
if method == "" {
|
||||
method = "POST"
|
||||
}
|
||||
|
||||
location := metadata.bucketLocation
|
||||
if location == "" {
|
||||
if metadata.bucketName != "" {
|
||||
// Gather location only if bucketName is present.
|
||||
location, err = c.getBucketLocation(metadata.bucketName)
|
||||
if err != nil {
|
||||
if ToErrorResponse(err).Code != "AccessDenied" {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Upon AccessDenied error on fetching bucket location, default
|
||||
// to possible locations based on endpoint URL. This can usually
|
||||
// happen when GetBucketLocation() is disabled using IAM policies.
|
||||
}
|
||||
if location == "" {
|
||||
location = getDefaultLocation(c.endpointURL, c.region)
|
||||
}
|
||||
}
|
||||
|
||||
// Construct a new target URL.
|
||||
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize a new HTTP request for the method.
|
||||
req, err = http.NewRequest(method, targetURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
// Generate presign url if needed, return right here.
|
||||
if metadata.expires != 0 && metadata.presignURL {
|
||||
if signerType.IsAnonymous() {
|
||||
return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
|
||||
}
|
||||
if signerType.IsV2() {
|
||||
// Presign URL with signature v2.
|
||||
req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires)
|
||||
} else if signerType.IsV4() {
|
||||
// Presign URL with signature v4.
|
||||
req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Set 'User-Agent' header for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Set all headers.
|
||||
for k, v := range metadata.customHeader {
|
||||
req.Header.Set(k, v[0])
|
||||
}
|
||||
|
||||
// Go net/http notoriously closes the request body.
|
||||
// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
|
||||
// This can cause underlying *os.File seekers to fail, avoid that
|
||||
// by making sure to wrap the closer as a nop.
|
||||
if metadata.contentLength == 0 {
|
||||
req.Body = nil
|
||||
} else {
|
||||
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||
}
|
||||
|
||||
// Set incoming content-length.
|
||||
req.ContentLength = metadata.contentLength
|
||||
if req.ContentLength <= -1 {
|
||||
// For unknown content length, we upload using transfer-encoding: chunked.
|
||||
req.TransferEncoding = []string{"chunked"}
|
||||
}
|
||||
|
||||
// set md5Sum for content protection.
|
||||
if metadata.contentMD5Bytes != nil {
|
||||
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
|
||||
}
|
||||
|
||||
// For anonymous requests just return.
|
||||
if signerType.IsAnonymous() {
|
||||
return req, nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case signerType.IsV2():
|
||||
// Add signature version '2' authorization header.
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
case signerType.IsStreamingV4() && method == "PUT":
|
||||
req = s3signer.StreamingSignV4(req, accessKeyID,
|
||||
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
|
||||
default:
|
||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||
shaHeader := unsignedPayload
|
||||
if len(metadata.contentSHA256Bytes) > 0 {
|
||||
shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
|
||||
}
|
||||
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
|
||||
|
||||
// Add signature version '4' authorization header.
|
||||
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
|
||||
}
|
||||
|
||||
// Return request.
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// set User agent.
|
||||
func (c Client) setUserAgent(req *http.Request) {
|
||||
req.Header.Set("User-Agent", libraryUserAgent)
|
||||
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
|
||||
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// makeTargetURL make a new target url.
|
||||
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
|
||||
host := c.endpointURL.Host
|
||||
// For Amazon S3 endpoint, try to fetch location based endpoint.
|
||||
if s3utils.IsAmazonEndpoint(c.endpointURL) {
|
||||
if c.s3AccelerateEndpoint != "" && bucketName != "" {
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||
// Disable transfer acceleration for non-compliant bucket names.
|
||||
if strings.Contains(bucketName, ".") {
|
||||
return nil, ErrTransferAccelerationBucket(bucketName)
|
||||
}
|
||||
// If transfer acceleration is requested set new host.
|
||||
// For more details about enabling transfer acceleration read here.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||
host = c.s3AccelerateEndpoint
|
||||
} else {
|
||||
// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
|
||||
if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) {
|
||||
// Fetch new host based on the bucket location.
|
||||
host = getS3Endpoint(bucketLocation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save scheme.
|
||||
scheme := c.endpointURL.Scheme
|
||||
|
||||
// Strip port 80 and 443 so we won't send these ports in Host header.
|
||||
// The reason is that browsers and curl automatically remove :80 and :443
|
||||
// with the generated presigned urls, then a signature mismatch error.
|
||||
if h, p, err := net.SplitHostPort(host); err == nil {
|
||||
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
|
||||
host = h
|
||||
}
|
||||
}
|
||||
|
||||
urlStr := scheme + "://" + host + "/"
|
||||
// Make URL only if bucketName is available, otherwise use the
|
||||
// endpoint URL.
|
||||
if bucketName != "" {
|
||||
// Save if target url will have buckets which suppport virtual host.
|
||||
isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
|
||||
|
||||
// If endpoint supports virtual host style use that always.
|
||||
// Currently only S3 and Google Cloud Storage would support
|
||||
// virtual host style.
|
||||
if isVirtualHostStyle {
|
||||
urlStr = scheme + "://" + bucketName + "." + host + "/"
|
||||
if objectName != "" {
|
||||
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||
}
|
||||
} else {
|
||||
// If not fall back to using path style.
|
||||
urlStr = urlStr + bucketName + "/"
|
||||
if objectName != "" {
|
||||
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there are any query values, add them to the end.
|
||||
if len(queryValues) > 0 {
|
||||
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
1470
vendor/github.com/minio/minio-go/api_functional_v2_test.go
generated
vendored
Normal file
1470
vendor/github.com/minio/minio-go/api_functional_v2_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2410
vendor/github.com/minio/minio-go/api_functional_v4_test.go
generated
vendored
Normal file
2410
vendor/github.com/minio/minio-go/api_functional_v4_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
337
vendor/github.com/minio/minio-go/api_unit_test.go
generated
vendored
Normal file
337
vendor/github.com/minio/minio-go/api_unit_test.go
generated
vendored
Normal file
|
@ -0,0 +1,337 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
)
|
||||
|
||||
type customReader struct{}
|
||||
|
||||
func (c *customReader) Read(p []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *customReader) Size() (n int64) {
|
||||
return 10
|
||||
}
|
||||
|
||||
// Tests getReaderSize() for various Reader types.
|
||||
func TestGetReaderSize(t *testing.T) {
|
||||
var reader io.Reader
|
||||
size, err := getReaderSize(reader)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != -1 {
|
||||
t.Fatal("Reader shouldn't have any length.")
|
||||
}
|
||||
|
||||
bytesReader := bytes.NewReader([]byte("Hello World"))
|
||||
size, err = getReaderSize(bytesReader)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != int64(len("Hello World")) {
|
||||
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
|
||||
}
|
||||
|
||||
size, err = getReaderSize(new(customReader))
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != int64(10) {
|
||||
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
|
||||
}
|
||||
|
||||
stringsReader := strings.NewReader("Hello World")
|
||||
size, err = getReaderSize(stringsReader)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != int64(len("Hello World")) {
|
||||
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
|
||||
}
|
||||
|
||||
// Create request channel.
|
||||
reqCh := make(chan getRequest, 1)
|
||||
// Create response channel.
|
||||
resCh := make(chan getResponse, 1)
|
||||
// Create done channel.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
objectInfo := ObjectInfo{Size: 10}
|
||||
// Create the first request.
|
||||
firstReq := getRequest{
|
||||
isReadOp: false, // Perform only a HEAD object to get objectInfo.
|
||||
isFirstReq: true,
|
||||
}
|
||||
// Create the expected response.
|
||||
firstRes := getResponse{
|
||||
objectInfo: objectInfo,
|
||||
}
|
||||
// Send the expected response.
|
||||
resCh <- firstRes
|
||||
|
||||
// Test setting size on the first request.
|
||||
objectReaderFirstReq := newObject(reqCh, resCh, doneCh)
|
||||
defer objectReaderFirstReq.Close()
|
||||
// Not checking the response here...just that the reader size is correct.
|
||||
_, err = objectReaderFirstReq.doGetRequest(firstReq)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
|
||||
// Validate that the reader size is the objectInfo size.
|
||||
size, err = getReaderSize(objectReaderFirstReq)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != int64(10) {
|
||||
t.Fatalf("Reader length doesn't match got: %d, wanted %d", size, objectInfo.Size)
|
||||
}
|
||||
|
||||
fileReader, err := ioutil.TempFile(os.TempDir(), "prefix")
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
defer fileReader.Close()
|
||||
defer os.RemoveAll(fileReader.Name())
|
||||
|
||||
size, err = getReaderSize(fileReader)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size == -1 {
|
||||
t.Fatal("Reader length for file cannot be -1.")
|
||||
}
|
||||
|
||||
// Verify for standard input, output and error file descriptors.
|
||||
size, err = getReaderSize(os.Stdin)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != -1 {
|
||||
t.Fatal("Stdin should have length of -1.")
|
||||
}
|
||||
size, err = getReaderSize(os.Stdout)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != -1 {
|
||||
t.Fatal("Stdout should have length of -1.")
|
||||
}
|
||||
size, err = getReaderSize(os.Stderr)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if size != -1 {
|
||||
t.Fatal("Stderr should have length of -1.")
|
||||
}
|
||||
file, err := os.Open(os.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = getReaderSize(file)
|
||||
if err == nil {
|
||||
t.Fatal("Input file as directory should throw an error.")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests valid hosts for location.
|
||||
func TestValidBucketLocation(t *testing.T) {
|
||||
s3Hosts := []struct {
|
||||
bucketLocation string
|
||||
endpoint string
|
||||
}{
|
||||
{"us-east-1", "s3.amazonaws.com"},
|
||||
{"unknown", "s3.amazonaws.com"},
|
||||
{"ap-southeast-1", "s3-ap-southeast-1.amazonaws.com"},
|
||||
}
|
||||
for _, s3Host := range s3Hosts {
|
||||
endpoint := getS3Endpoint(s3Host.bucketLocation)
|
||||
if endpoint != s3Host.endpoint {
|
||||
t.Fatal("Error: invalid bucket location", endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests error response structure.
|
||||
func TestErrorResponse(t *testing.T) {
|
||||
var err error
|
||||
err = ErrorResponse{
|
||||
Code: "Testing",
|
||||
}
|
||||
errResp := ToErrorResponse(err)
|
||||
if errResp.Code != "Testing" {
|
||||
t.Fatal("Type conversion failed, we have an empty struct.")
|
||||
}
|
||||
|
||||
// Test http response decoding.
|
||||
var httpResponse *http.Response
|
||||
// Set empty variables
|
||||
httpResponse = nil
|
||||
var bucketName, objectName string
|
||||
|
||||
// Should fail with invalid argument.
|
||||
err = httpRespToErrorResponse(httpResponse, bucketName, objectName)
|
||||
errResp = ToErrorResponse(err)
|
||||
if errResp.Code != "InvalidArgument" {
|
||||
t.Fatal("Empty response input should return invalid argument.")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests signature type.
|
||||
func TestSignatureType(t *testing.T) {
|
||||
clnt := Client{}
|
||||
if !clnt.overrideSignerType.IsV4() {
|
||||
t.Fatal("Error")
|
||||
}
|
||||
clnt.overrideSignerType = credentials.SignatureV2
|
||||
if !clnt.overrideSignerType.IsV2() {
|
||||
t.Fatal("Error")
|
||||
}
|
||||
if clnt.overrideSignerType.IsV4() {
|
||||
t.Fatal("Error")
|
||||
}
|
||||
clnt.overrideSignerType = credentials.SignatureV4
|
||||
if !clnt.overrideSignerType.IsV4() {
|
||||
t.Fatal("Error")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests bucket policy types.
|
||||
func TestBucketPolicyTypes(t *testing.T) {
|
||||
want := map[string]bool{
|
||||
"none": true,
|
||||
"readonly": true,
|
||||
"writeonly": true,
|
||||
"readwrite": true,
|
||||
"invalid": false,
|
||||
}
|
||||
for bucketPolicy, ok := range want {
|
||||
if policy.BucketPolicy(bucketPolicy).IsValidBucketPolicy() != ok {
|
||||
t.Fatal("Error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests optimal part size.
|
||||
func TestPartSize(t *testing.T) {
|
||||
_, _, _, err := optimalPartInfo(5000000000000000000)
|
||||
if err == nil {
|
||||
t.Fatal("Error: should fail")
|
||||
}
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5497558138880)
|
||||
if err != nil {
|
||||
t.Fatal("Error: ", err)
|
||||
}
|
||||
if totalPartsCount != 9103 {
|
||||
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
|
||||
}
|
||||
if partSize != 603979776 {
|
||||
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
|
||||
}
|
||||
if lastPartSize != 134217728 {
|
||||
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
||||
}
|
||||
_, partSize, _, err = optimalPartInfo(5000000000)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if partSize != minPartSize {
|
||||
t.Fatalf("Error: expecting part size of %v: got %v instead", minPartSize, partSize)
|
||||
}
|
||||
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(-1)
|
||||
if err != nil {
|
||||
t.Fatal("Error:", err)
|
||||
}
|
||||
if totalPartsCount != 9103 {
|
||||
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
|
||||
}
|
||||
if partSize != 603979776 {
|
||||
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
|
||||
}
|
||||
if lastPartSize != 134217728 {
|
||||
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMakeTargetURL - testing makeTargetURL()
|
||||
func TestMakeTargetURL(t *testing.T) {
|
||||
testCases := []struct {
|
||||
addr string
|
||||
secure bool
|
||||
bucketName string
|
||||
objectName string
|
||||
bucketLocation string
|
||||
queryValues map[string][]string
|
||||
expectedURL url.URL
|
||||
expectedErr error
|
||||
}{
|
||||
// Test 1
|
||||
{"localhost:9000", false, "", "", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/"}, nil},
|
||||
// Test 2
|
||||
{"localhost", true, "", "", "", nil, url.URL{Host: "localhost", Scheme: "https", Path: "/"}, nil},
|
||||
// Test 3
|
||||
{"localhost:9000", true, "mybucket", "", "", nil, url.URL{Host: "localhost:9000", Scheme: "https", Path: "/mybucket/"}, nil},
|
||||
// Test 4, testing against google storage API
|
||||
{"storage.googleapis.com", true, "mybucket", "", "", nil, url.URL{Host: "mybucket.storage.googleapis.com", Scheme: "https", Path: "/"}, nil},
|
||||
// Test 5, testing against AWS S3 API
|
||||
{"s3.amazonaws.com", true, "mybucket", "myobject", "", nil, url.URL{Host: "mybucket.s3.amazonaws.com", Scheme: "https", Path: "/myobject"}, nil},
|
||||
// Test 6
|
||||
{"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil},
|
||||
// Test 7, testing with query
|
||||
{"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": []string{"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
|
||||
// Test 8, testing with port 80
|
||||
{"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil},
|
||||
// Test 9, testing with port 443
|
||||
{"localhost:443", true, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "https", Path: "/mybucket/myobject"}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
// Initialize a Minio client
|
||||
c, _ := New(testCase.addr, "foo", "bar", testCase.secure)
|
||||
u, err := c.makeTargetURL(testCase.bucketName, testCase.objectName, testCase.bucketLocation, testCase.queryValues)
|
||||
// Check the returned error
|
||||
if testCase.expectedErr == nil && err != nil {
|
||||
t.Fatalf("Test %d: Should succeed but failed with err = %v", i+1, err)
|
||||
}
|
||||
if testCase.expectedErr != nil && err == nil {
|
||||
t.Fatalf("Test %d: Should fail but succeeded", i+1)
|
||||
}
|
||||
if err == nil {
|
||||
// Check if the returned url is equal to what we expect
|
||||
if u.String() != testCase.expectedURL.String() {
|
||||
t.Fatalf("Test %d: Mismatched target url: expected = `%v`, found = `%v`",
|
||||
i+1, testCase.expectedURL.String(), u.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
39
vendor/github.com/minio/minio-go/appveyor.yml
generated
vendored
Normal file
39
vendor/github.com/minio/minio-go/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
# version format
|
||||
version: "{build}"
|
||||
|
||||
# Operating system (build VM template)
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\minio\minio-go
|
||||
|
||||
# environment variables
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GO15VENDOREXPERIMENT: 1
|
||||
|
||||
# scripts that run after cloning repository
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- go get -u github.com/go-ini/ini
|
||||
- go get -u github.com/minio/go-homedir
|
||||
- go get -u github.com/remyoudompheng/go-misc/deadcode
|
||||
- go get -u github.com/gordonklaus/ineffassign
|
||||
|
||||
# to run your custom scripts instead of automatic MSBuild
|
||||
build_script:
|
||||
- go vet ./...
|
||||
- gofmt -s -l .
|
||||
- golint -set_exit_status github.com/minio/minio-go...
|
||||
- deadcode
|
||||
- ineffassign .
|
||||
- go test -short -v
|
||||
- go test -short -race -v
|
||||
|
||||
# to disable automatic tests
|
||||
test: off
|
||||
|
||||
# to disable deployment
|
||||
deploy: off
|
232
vendor/github.com/minio/minio-go/bucket-cache.go
generated
vendored
Normal file
232
vendor/github.com/minio/minio-go/bucket-cache.go
generated
vendored
Normal file
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/s3signer"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// bucketLocationCache - Provides simple mechanism to hold bucket
|
||||
// locations in memory.
|
||||
type bucketLocationCache struct {
|
||||
// mutex is used for handling the concurrent
|
||||
// read/write requests for cache.
|
||||
sync.RWMutex
|
||||
|
||||
// items holds the cached bucket locations.
|
||||
items map[string]string
|
||||
}
|
||||
|
||||
// newBucketLocationCache - Provides a new bucket location cache to be
|
||||
// used internally with the client object.
|
||||
func newBucketLocationCache() *bucketLocationCache {
|
||||
return &bucketLocationCache{
|
||||
items: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Get - Returns a value of a given key if it exists.
|
||||
func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
location, ok = r.items[bucketName]
|
||||
return
|
||||
}
|
||||
|
||||
// Set - Will persist a value into cache.
|
||||
func (r *bucketLocationCache) Set(bucketName string, location string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.items[bucketName] = location
|
||||
}
|
||||
|
||||
// Delete - Deletes a bucket name from cache.
|
||||
func (r *bucketLocationCache) Delete(bucketName string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
delete(r.items, bucketName)
|
||||
}
|
||||
|
||||
// GetBucketLocation - get location for the bucket name from location cache, if not
|
||||
// fetch freshly by making a new request.
|
||||
func (c Client) GetBucketLocation(bucketName string) (string, error) {
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.getBucketLocation(bucketName)
|
||||
}
|
||||
|
||||
// getBucketLocation - Get location for the bucketName from location map cache, if not
|
||||
// fetch freshly by making a new request.
|
||||
func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Region set then no need to fetch bucket location.
|
||||
if c.region != "" {
|
||||
return c.region, nil
|
||||
}
|
||||
|
||||
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
||||
// For china specifically we need to set everything to
|
||||
// cn-north-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// China region.
|
||||
return "cn-north-1", nil
|
||||
} else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) {
|
||||
// For us-gov specifically we need to set everything to
|
||||
// us-gov-west-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// Gov cloud region.
|
||||
return "us-gov-west-1", nil
|
||||
}
|
||||
|
||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||
return location, nil
|
||||
}
|
||||
|
||||
// Initialize a new request.
|
||||
req, err := c.getBucketLocationRequest(bucketName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Initiate the request.
|
||||
resp, err := c.do(req)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
location, err := processBucketLocationResponse(resp, bucketName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
c.bucketLocCache.Set(bucketName, location)
|
||||
return location, nil
|
||||
}
|
||||
|
||||
// processes the getBucketLocation http response from the server.
|
||||
func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = httpRespToErrorResponse(resp, bucketName, "")
|
||||
errResp := ToErrorResponse(err)
|
||||
// For access denied error, it could be an anonymous
|
||||
// request. Move forward and let the top level callers
|
||||
// succeed if possible based on their policy.
|
||||
if errResp.Code == "AccessDenied" {
|
||||
return "us-east-1", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Extract location.
|
||||
var locationConstraint string
|
||||
err = xmlDecoder(resp.Body, &locationConstraint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
location := locationConstraint
|
||||
// Location is empty will be 'us-east-1'.
|
||||
if location == "" {
|
||||
location = "us-east-1"
|
||||
}
|
||||
|
||||
// Location can be 'EU' convert it to meaningful 'eu-west-1'.
|
||||
if location == "EU" {
|
||||
location = "eu-west-1"
|
||||
}
|
||||
|
||||
// Save the location into cache.
|
||||
|
||||
// Return.
|
||||
return location, nil
|
||||
}
|
||||
|
||||
// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
|
||||
func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
|
||||
// Set location query.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("location", "")
|
||||
|
||||
// Set get bucket location always as path style.
|
||||
targetURL := c.endpointURL
|
||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||
targetURL.RawQuery = urlValues.Encode()
|
||||
|
||||
// Get a new HTTP request for the method.
|
||||
req, err := http.NewRequest("GET", targetURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set UserAgent for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||
switch {
|
||||
case signerType.IsV4():
|
||||
var contentSha256 string
|
||||
if c.secure {
|
||||
contentSha256 = unsignedPayload
|
||||
} else {
|
||||
contentSha256 = hex.EncodeToString(sum256([]byte{}))
|
||||
}
|
||||
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
|
||||
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
|
||||
case signerType.IsV2():
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
354
vendor/github.com/minio/minio-go/bucket-cache_test.go
generated
vendored
Normal file
354
vendor/github.com/minio/minio-go/bucket-cache_test.go
generated
vendored
Normal file
|
@ -0,0 +1,354 @@
|
|||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/s3signer"
|
||||
)
|
||||
|
||||
// Test validates `newBucketLocationCache`.
|
||||
func TestNewBucketLocationCache(t *testing.T) {
|
||||
expectedBucketLocationcache := &bucketLocationCache{
|
||||
items: make(map[string]string),
|
||||
}
|
||||
actualBucketLocationCache := newBucketLocationCache()
|
||||
|
||||
if !reflect.DeepEqual(actualBucketLocationCache, expectedBucketLocationcache) {
|
||||
t.Errorf("Unexpected return value")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests validate bucketLocationCache operations.
|
||||
func TestBucketLocationCacheOps(t *testing.T) {
|
||||
testBucketLocationCache := newBucketLocationCache()
|
||||
expectedBucketName := "minio-bucket"
|
||||
expectedLocation := "us-east-1"
|
||||
testBucketLocationCache.Set(expectedBucketName, expectedLocation)
|
||||
actualLocation, ok := testBucketLocationCache.Get(expectedBucketName)
|
||||
if !ok {
|
||||
t.Errorf("Bucket location cache not set")
|
||||
}
|
||||
if expectedLocation != actualLocation {
|
||||
t.Errorf("Bucket location cache not set to expected value")
|
||||
}
|
||||
testBucketLocationCache.Delete(expectedBucketName)
|
||||
_, ok = testBucketLocationCache.Get(expectedBucketName)
|
||||
if ok {
|
||||
t.Errorf("Bucket location cache not deleted as expected")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests validate http request generation for 'getBucketLocation'.
|
||||
func TestGetBucketLocationRequest(t *testing.T) {
|
||||
// Generates expected http request for getBucketLocation.
|
||||
// Used for asserting with the actual request generated.
|
||||
createExpectedRequest := func(c *Client, bucketName string, req *http.Request) (*http.Request, error) {
|
||||
// Set location query.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("location", "")
|
||||
|
||||
// Set get bucket location always as path style.
|
||||
targetURL := c.endpointURL
|
||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||
targetURL.RawQuery = urlValues.Encode()
|
||||
|
||||
// Get a new HTTP request for the method.
|
||||
var err error
|
||||
req, err = http.NewRequest("GET", targetURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set UserAgent for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
// Set sha256 sum for signature calculation only
|
||||
// with signature version '4'.
|
||||
switch {
|
||||
case signerType.IsV4():
|
||||
var contentSha256 string
|
||||
if c.secure {
|
||||
contentSha256 = unsignedPayload
|
||||
} else {
|
||||
contentSha256 = hex.EncodeToString(sum256([]byte{}))
|
||||
}
|
||||
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
|
||||
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
|
||||
case signerType.IsV2():
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
|
||||
}
|
||||
// Info for 'Client' creation.
|
||||
// Will be used as arguments for 'NewClient'.
|
||||
type infoForClient struct {
|
||||
endPoint string
|
||||
accessKey string
|
||||
secretKey string
|
||||
enableInsecure bool
|
||||
}
|
||||
// dataset for 'NewClient' call.
|
||||
info := []infoForClient{
|
||||
// endpoint localhost.
|
||||
// both access-key and secret-key are empty.
|
||||
{"localhost:9000", "", "", false},
|
||||
// both access-key are secret-key exists.
|
||||
{"localhost:9000", "my-access-key", "my-secret-key", false},
|
||||
// one of acess-key and secret-key are empty.
|
||||
{"localhost:9000", "", "my-secret-key", false},
|
||||
|
||||
// endpoint amazon s3.
|
||||
{"s3.amazonaws.com", "", "", false},
|
||||
{"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
|
||||
{"s3.amazonaws.com", "my-acess-key", "", false},
|
||||
|
||||
// endpoint google cloud storage.
|
||||
{"storage.googleapis.com", "", "", false},
|
||||
{"storage.googleapis.com", "my-access-key", "my-secret-key", false},
|
||||
{"storage.googleapis.com", "", "my-secret-key", false},
|
||||
|
||||
// endpoint custom domain running Minio server.
|
||||
{"play.minio.io", "", "", false},
|
||||
{"play.minio.io", "my-access-key", "my-secret-key", false},
|
||||
{"play.minio.io", "my-acess-key", "", false},
|
||||
}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
// data for new client creation.
|
||||
info infoForClient
|
||||
// error in the output.
|
||||
err error
|
||||
// flag indicating whether tests should pass.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Client is constructed using the info struct.
|
||||
// case with empty location.
|
||||
{"my-bucket", info[0], nil, true},
|
||||
// case with location set to standard 'us-east-1'.
|
||||
{"my-bucket", info[0], nil, true},
|
||||
// case with location set to a value different from 'us-east-1'.
|
||||
{"my-bucket", info[0], nil, true},
|
||||
|
||||
{"my-bucket", info[1], nil, true},
|
||||
{"my-bucket", info[1], nil, true},
|
||||
{"my-bucket", info[1], nil, true},
|
||||
|
||||
{"my-bucket", info[2], nil, true},
|
||||
{"my-bucket", info[2], nil, true},
|
||||
{"my-bucket", info[2], nil, true},
|
||||
|
||||
{"my-bucket", info[3], nil, true},
|
||||
{"my-bucket", info[3], nil, true},
|
||||
{"my-bucket", info[3], nil, true},
|
||||
|
||||
{"my-bucket", info[4], nil, true},
|
||||
{"my-bucket", info[4], nil, true},
|
||||
{"my-bucket", info[4], nil, true},
|
||||
|
||||
{"my-bucket", info[5], nil, true},
|
||||
{"my-bucket", info[5], nil, true},
|
||||
{"my-bucket", info[5], nil, true},
|
||||
|
||||
{"my-bucket", info[6], nil, true},
|
||||
{"my-bucket", info[6], nil, true},
|
||||
{"my-bucket", info[6], nil, true},
|
||||
|
||||
{"my-bucket", info[7], nil, true},
|
||||
{"my-bucket", info[7], nil, true},
|
||||
{"my-bucket", info[7], nil, true},
|
||||
|
||||
{"my-bucket", info[8], nil, true},
|
||||
{"my-bucket", info[8], nil, true},
|
||||
{"my-bucket", info[8], nil, true},
|
||||
|
||||
{"my-bucket", info[9], nil, true},
|
||||
{"my-bucket", info[9], nil, true},
|
||||
{"my-bucket", info[9], nil, true},
|
||||
|
||||
{"my-bucket", info[10], nil, true},
|
||||
{"my-bucket", info[10], nil, true},
|
||||
{"my-bucket", info[10], nil, true},
|
||||
|
||||
{"my-bucket", info[11], nil, true},
|
||||
{"my-bucket", info[11], nil, true},
|
||||
{"my-bucket", info[11], nil, true},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
// cannot create a newclient with empty endPoint value.
|
||||
// validates and creates a new client only if the endPoint value is not empty.
|
||||
client := &Client{}
|
||||
var err error
|
||||
if testCase.info.endPoint != "" {
|
||||
|
||||
client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
actualReq, err := client.getBucketLocationRequest(testCase.bucketName)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if err.Error() != testCase.err.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
expectedReq := &http.Request{}
|
||||
expectedReq, err = createExpectedRequest(client, testCase.bucketName, expectedReq)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Expected request Creation failed", i+1)
|
||||
}
|
||||
if expectedReq.Method != actualReq.Method {
|
||||
t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
|
||||
}
|
||||
if expectedReq.URL.String() != actualReq.URL.String() {
|
||||
t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
|
||||
}
|
||||
if expectedReq.ContentLength != actualReq.ContentLength {
|
||||
t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
|
||||
}
|
||||
|
||||
if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
|
||||
t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request doesn't match with that of the actual request", i+1)
|
||||
}
|
||||
if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
|
||||
t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generates http response with bucket location set in the body.
|
||||
func generateLocationResponse(resp *http.Response, bodyContent []byte) (*http.Response, error) {
|
||||
resp.StatusCode = http.StatusOK
|
||||
resp.Body = ioutil.NopCloser(bytes.NewBuffer(bodyContent))
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Tests the processing of GetPolicy response from server.
|
||||
func TestProcessBucketLocationResponse(t *testing.T) {
|
||||
// LocationResponse - format for location response.
|
||||
type LocationResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"`
|
||||
Location string `xml:",chardata"`
|
||||
}
|
||||
|
||||
APIErrors := []APIError{
|
||||
{
|
||||
Code: "AccessDenied",
|
||||
Description: "Access Denied",
|
||||
HTTPStatusCode: http.StatusUnauthorized,
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
inputLocation string
|
||||
isAPIError bool
|
||||
apiErr APIError
|
||||
// expected results.
|
||||
expectedResult string
|
||||
err error
|
||||
// flag indicating whether tests should pass.
|
||||
shouldPass bool
|
||||
}{
|
||||
{"my-bucket", "", true, APIErrors[0], "us-east-1", nil, true},
|
||||
{"my-bucket", "", false, APIError{}, "us-east-1", nil, true},
|
||||
{"my-bucket", "EU", false, APIError{}, "eu-west-1", nil, true},
|
||||
{"my-bucket", "eu-central-1", false, APIError{}, "eu-central-1", nil, true},
|
||||
{"my-bucket", "us-east-1", false, APIError{}, "us-east-1", nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
inputResponse := &http.Response{}
|
||||
var err error
|
||||
if testCase.isAPIError {
|
||||
inputResponse = generateErrorResponse(inputResponse, testCase.apiErr, testCase.bucketName)
|
||||
} else {
|
||||
inputResponse, err = generateLocationResponse(inputResponse, encodeResponse(LocationResponse{
|
||||
Location: testCase.inputLocation,
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Creation of valid response failed", i+1)
|
||||
}
|
||||
}
|
||||
actualResult, err := processBucketLocationResponse(inputResponse, "my-bucket")
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if err.Error() != testCase.err.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil && testCase.shouldPass {
|
||||
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
|
||||
t.Errorf("Test %d: The expected BucketPolicy doesn't match the actual BucketPolicy", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue