diff --git a/go.mod b/go.mod index b8ae83584..d3e3fc5aa 100644 --- a/go.mod +++ b/go.mod @@ -67,7 +67,7 @@ require ( google.golang.org/api v0.21.1-0.20200411000818-c8cf5cff125e google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 // indirect gopkg.in/yaml.v2 v2.2.8 - storj.io/uplink v1.0.5 + storj.io/uplink v1.0.6 ) go 1.13 diff --git a/go.sum b/go.sum index 1b69a51c2..528a50a54 100644 --- a/go.sum +++ b/go.sum @@ -359,8 +359,8 @@ github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1/go.mod h1:7NL github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= -github.com/spacemonkeygo/monkit/v3 v3.0.6 h1:BKPrEaLokVAxlwHkD7jawViBa/IU9/bgXbZLWgjbdSM= -github.com/spacemonkeygo/monkit/v3 v3.0.6/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= +github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 h1:WcQDknqg0qajLNYKv3mXgbkWlYs5rPgZehGJFWePHVI= +github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -657,9 +657,9 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -storj.io/common v0.0.0-20200429074521-4ba140e4b747 h1:Ne1x0M80uNyN6tHIs15CGJqHbreKbvH5BOq4jdWsqMc= -storj.io/common v0.0.0-20200429074521-4ba140e4b747/go.mod h1:lfsaMdtHwrUOtSYkw73meCyZMUYiaFBKVqx6zgeSz2o= -storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw= -storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw= -storj.io/uplink v1.0.5 h1:RH6LUZQPOJZ01JpM0YmghDu5xyex5gyn32NSCzsPSr4= -storj.io/uplink v1.0.5/go.mod h1:GkChEUgHFuUR2WNpqFw3NeqglXz6/zp6n5Rxt0IVfHw= +storj.io/common v0.0.0-20200519171747-3ff8acf78c46 h1:Yx73D928PKtyQYPXHuQ5WFES4t+0nufxbhwyf8VodMw= +storj.io/common v0.0.0-20200519171747-3ff8acf78c46/go.mod h1:hqUDJlDHU1kZuZmfLohWgGa0Cf3pL1IH8DsxLCsamNQ= +storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA= +storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA= +storj.io/uplink v1.0.6 h1:UztG/bSJitJt2c5Ofq9SmQ+a/yrLiHcPVR9gLfzcWG0= +storj.io/uplink v1.0.6/go.mod h1:F+AeZR4l9F9A7K1K+GXJJarwaKOQK0RWmbyy/maMFEI= diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/ctx.go b/vendor/github.com/spacemonkeygo/monkit/v3/ctx.go index c0a36b02a..0bdcbaef0 100644 --- a/vendor/github.com/spacemonkeygo/monkit/v3/ctx.go +++ b/vendor/github.com/spacemonkeygo/monkit/v3/ctx.go @@ -272,6 +272,33 @@ func (f *Func) ResetTrace(ctx *context.Context, return exit } +// RestartTrace is like Func.Task, except it always creates a new Trace and inherient +// all tags from the existing trace. +func (f *Func) RestartTrace(ctx *context.Context, args ...interface{}) func(*error) { + existingSpan := SpanFromCtx(*ctx) + if existingSpan == nil { + return f.ResetTrace(ctx, args) + } + existingTrace := existingSpan.Trace() + if existingTrace == nil { + return f.ResetTrace(ctx, args) + } + + ctx = cleanCtx(ctx) + if ctx == &taskSecret && taskArgs(f, args) { + return nil + } + trace := NewTrace(NewId()) + trace.copyFrom(existingTrace) + f.scope.r.observeTrace(trace) + s, exit := newSpan(*ctx, f, args, trace.Id(), trace) + + if ctx != &unparented { + *ctx = s + } + return exit +} + var unparented = context.Background() func cleanCtx(ctx *context.Context) *context.Context { diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/trace.go b/vendor/github.com/spacemonkeygo/monkit/v3/trace.go index d20232455..5be7f36fe 100644 --- a/vendor/github.com/spacemonkeygo/monkit/v3/trace.go +++ b/vendor/github.com/spacemonkeygo/monkit/v3/trace.go @@ -108,6 +108,17 @@ func removeObserverFrom(parent **spanObserverTuple, ref *spanObserverTuple) ( // Id returns the id of the Trace func (t *Trace) Id() int64 { return t.id } +// GetAll returns values associated with a trace. See SetAll. +func (t *Trace) GetAll() (val map[interface{}]interface{}) { + t.mtx.Lock() + defer t.mtx.Unlock() + new := make(map[interface{}]interface{}, len(t.vals)) + for k, v := range t.vals { + new[k] = v + } + return new +} + // Get returns a value associated with a key on a trace. See Set. func (t *Trace) Get(key interface{}) (val interface{}) { t.mtx.Lock() @@ -129,6 +140,14 @@ func (t *Trace) Set(key, val interface{}) { t.mtx.Unlock() } +// copyFrom replace all key/value on a trace with a new sets of key/value. +func (t *Trace) copyFrom(s *Trace) { + vals := s.GetAll() + t.mtx.Lock() + defer t.mtx.Unlock() + t.vals = vals +} + func (t *Trace) incrementSpans() { atomic.AddInt64(&t.spanCount, 1) } func (t *Trace) decrementSpans() { atomic.AddInt64(&t.spanCount, -1) } diff --git a/vendor/modules.txt b/vendor/modules.txt index a56f26b36..724685f55 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -202,7 +202,7 @@ github.com/sirupsen/logrus github.com/skratchdot/open-golang/open # github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 github.com/spacemonkeygo/errors -# github.com/spacemonkeygo/monkit/v3 v3.0.6 +# github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 github.com/spacemonkeygo/monkit/v3 github.com/spacemonkeygo/monkit/v3/monotime # github.com/spf13/cobra v1.0.0 @@ -393,12 +393,11 @@ google.golang.org/grpc/status google.golang.org/grpc/tap # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# storj.io/common v0.0.0-20200429074521-4ba140e4b747 +# storj.io/common v0.0.0-20200519171747-3ff8acf78c46 storj.io/common/encryption storj.io/common/errs2 storj.io/common/fpath storj.io/common/identity -storj.io/common/internal/grpchook storj.io/common/macaroon storj.io/common/memory storj.io/common/netutil @@ -419,8 +418,9 @@ storj.io/common/signing storj.io/common/storj storj.io/common/sync2 storj.io/common/uuid -# storj.io/drpc v0.0.11 +# storj.io/drpc v0.0.12 storj.io/drpc +storj.io/drpc/drpccache storj.io/drpc/drpcconn storj.io/drpc/drpcctx storj.io/drpc/drpcdebug @@ -432,7 +432,7 @@ storj.io/drpc/drpcmux storj.io/drpc/drpcsignal storj.io/drpc/drpcstream storj.io/drpc/drpcwire -# storj.io/uplink v1.0.5 +# storj.io/uplink v1.0.6 storj.io/uplink storj.io/uplink/internal/expose storj.io/uplink/internal/telemetryclient @@ -444,3 +444,4 @@ storj.io/uplink/private/piecestore storj.io/uplink/private/storage/segments storj.io/uplink/private/storage/streams storj.io/uplink/private/stream +storj.io/uplink/private/testuplink diff --git a/vendor/storj.io/common/encryption/aesgcm.go b/vendor/storj.io/common/encryption/aesgcm.go index 877cc3ee9..74e3d029d 100644 --- a/vendor/storj.io/common/encryption/aesgcm.go +++ b/vendor/storj.io/common/encryption/aesgcm.go @@ -130,7 +130,7 @@ func (s *aesgcmDecrypter) Transform(out, in []byte, blockNum int64) ([]byte, err return plainData, nil } -// EncryptAESGCM encrypts byte data with a key and nonce. The cipher data is returned +// EncryptAESGCM encrypts byte data with a key and nonce. It returns the cipher data. func EncryptAESGCM(data []byte, key *storj.Key, nonce *AESGCMNonce) (cipherData []byte, err error) { block, err := aes.NewCipher(key[:]) if err != nil { @@ -144,7 +144,7 @@ func EncryptAESGCM(data []byte, key *storj.Key, nonce *AESGCMNonce) (cipherData return cipherData, nil } -// DecryptAESGCM decrypts byte data with a key and nonce. The plain data is returned +// DecryptAESGCM decrypts byte data with a key and nonce. It returns the plain data. func DecryptAESGCM(cipherData []byte, key *storj.Key, nonce *AESGCMNonce) (data []byte, err error) { if len(cipherData) == 0 { return []byte{}, Error.New("empty cipher data") diff --git a/vendor/storj.io/common/encryption/common.go b/vendor/storj.io/common/encryption/common.go index 08b344635..3aa90bb7b 100644 --- a/vendor/storj.io/common/encryption/common.go +++ b/vendor/storj.io/common/encryption/common.go @@ -8,11 +8,11 @@ import ( "github.com/zeebo/errs" ) -// Error is the default encryption errs class +// Error is the default encryption errs class. var Error = errs.Class("encryption error") -// ErrDecryptFailed is the errs class when the decryption fails +// ErrDecryptFailed is the errs class when the decryption fails. var ErrDecryptFailed = errs.Class("decryption failed, check encryption key") -// ErrInvalidConfig is the errs class for invalid configuration +// ErrInvalidConfig is the errs class for invalid configuration. var ErrInvalidConfig = errs.Class("invalid encryption configuration") diff --git a/vendor/storj.io/common/encryption/encryption.go b/vendor/storj.io/common/encryption/encryption.go index 278fe70d1..4fdf8e344 100644 --- a/vendor/storj.io/common/encryption/encryption.go +++ b/vendor/storj.io/common/encryption/encryption.go @@ -11,28 +11,28 @@ import ( ) const ( - // AESGCMNonceSize is the size of an AES-GCM nonce + // AESGCMNonceSize is the size of an AES-GCM nonce. AESGCMNonceSize = 12 - // unit32Size is the number of bytes in the uint32 type + // unit32Size is the number of bytes in the uint32 type. uint32Size = 4 ) -// AESGCMNonce represents the nonce used by the AES-GCM protocol +// AESGCMNonce represents the nonce used by the AES-GCM protocol. type AESGCMNonce [AESGCMNonceSize]byte -// ToAESGCMNonce returns the nonce as a AES-GCM nonce +// ToAESGCMNonce returns the nonce as a AES-GCM nonce. func ToAESGCMNonce(nonce *storj.Nonce) *AESGCMNonce { aes := new(AESGCMNonce) copy((*aes)[:], nonce[:AESGCMNonceSize]) return aes } -// Increment increments the nonce with the given amount +// Increment increments the nonce with the given amount. func Increment(nonce *storj.Nonce, amount int64) (truncated bool, err error) { return incrementBytes(nonce[:], amount) } -// Encrypt encrypts data with the given cipher, key and nonce +// Encrypt encrypts data with the given cipher, key and nonce. func Encrypt(data []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) { // Don't encrypt empty slice if len(data) == 0 { @@ -53,7 +53,7 @@ func Encrypt(data []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj } } -// Decrypt decrypts cipherData with the given cipher, key and nonce +// Decrypt decrypts cipherData with the given cipher, key and nonce. func Decrypt(cipherData []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) { // Don't decrypt empty slice if len(cipherData) == 0 { @@ -74,7 +74,7 @@ func Decrypt(cipherData []byte, cipher storj.CipherSuite, key *storj.Key, nonce } } -// NewEncrypter creates a Transformer using the given cipher, key and nonce to encrypt data passing through it +// NewEncrypter creates a Transformer using the given cipher, key and nonce to encrypt data passing through it. func NewEncrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { switch cipher { case storj.EncNull: @@ -90,7 +90,7 @@ func NewEncrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj } } -// NewDecrypter creates a Transformer using the given cipher, key and nonce to decrypt data passing through it +// NewDecrypter creates a Transformer using the given cipher, key and nonce to decrypt data passing through it. func NewDecrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { switch cipher { case storj.EncNull: @@ -106,12 +106,12 @@ func NewDecrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj } } -// EncryptKey encrypts keyToEncrypt with the given cipher, key and nonce +// EncryptKey encrypts keyToEncrypt with the given cipher, key and nonce. func EncryptKey(keyToEncrypt *storj.Key, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (storj.EncryptedPrivateKey, error) { return Encrypt(keyToEncrypt[:], cipher, key, nonce) } -// DecryptKey decrypts keyToDecrypt with the given cipher, key and nonce +// DecryptKey decrypts keyToDecrypt with the given cipher, key and nonce. func DecryptKey(keyToDecrypt storj.EncryptedPrivateKey, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (*storj.Key, error) { plainData, err := Decrypt(keyToDecrypt, cipher, key, nonce) if err != nil { @@ -124,7 +124,7 @@ func DecryptKey(keyToDecrypt storj.EncryptedPrivateKey, cipher storj.CipherSuite return &decryptedKey, nil } -// DeriveKey derives new key from the given key and message using HMAC-SHA512 +// DeriveKey derives new key from the given key and message using HMAC-SHA512. func DeriveKey(key *storj.Key, message string) (*storj.Key, error) { mac := hmac.New(sha512.New, key[:]) _, err := mac.Write([]byte(message)) diff --git a/vendor/storj.io/common/encryption/path.go b/vendor/storj.io/common/encryption/path.go index c923a8338..8a176b352 100644 --- a/vendor/storj.io/common/encryption/path.go +++ b/vendor/storj.io/common/encryption/path.go @@ -397,7 +397,7 @@ func decryptPathComponent(comp string, cipher storj.CipherSuite, key *storj.Key) // `\xff` escapes to `\xfe\x02` // `\x00` escapes to `\x01\x01` // `\x01` escapes to `\x01\x02 -// for more details see docs/design/path-component-encoding.md +// for more details see docs/design/path-component-encoding.md. func encodeSegment(segment []byte) []byte { if len(segment) == 0 { return emptyComponent diff --git a/vendor/storj.io/common/encryption/secretbox.go b/vendor/storj.io/common/encryption/secretbox.go index de48da293..9cb804d3c 100644 --- a/vendor/storj.io/common/encryption/secretbox.go +++ b/vendor/storj.io/common/encryption/secretbox.go @@ -104,12 +104,12 @@ func (s *secretboxDecrypter) Transform(out, in []byte, blockNum int64) ([]byte, return rv, nil } -// EncryptSecretBox encrypts byte data with a key and nonce. The cipher data is returned +// EncryptSecretBox encrypts byte data with a key and nonce. The cipher data is returned. func EncryptSecretBox(data []byte, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) { return secretbox.Seal(nil, data, nonce.Raw(), key.Raw()), nil } -// DecryptSecretBox decrypts byte data with a key and nonce. The plain data is returned +// DecryptSecretBox decrypts byte data with a key and nonce. The plain data is returned. func DecryptSecretBox(cipherData []byte, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) { data, success := secretbox.Open(nil, cipherData, nonce.Raw(), key.Raw()) if !success { diff --git a/vendor/storj.io/common/encryption/transform.go b/vendor/storj.io/common/encryption/transform.go index 337f7f864..2b6638bf8 100644 --- a/vendor/storj.io/common/encryption/transform.go +++ b/vendor/storj.io/common/encryption/transform.go @@ -35,20 +35,20 @@ type transformedReader struct { bytesRead int } -// NoopTransformer is a dummy Transformer that passes data through without modifying it +// NoopTransformer is a dummy Transformer that passes data through without modifying it. type NoopTransformer struct{} -// InBlockSize is 1 +// InBlockSize is 1. func (t *NoopTransformer) InBlockSize() int { return 1 } -// OutBlockSize is 1 +// OutBlockSize is 1. func (t *NoopTransformer) OutBlockSize() int { return 1 } -// Transform returns the input without modification +// Transform returns the input without modification. func (t *NoopTransformer) Transform(out, in []byte, blockNum int64) ([]byte, error) { return append(out, in...), nil } @@ -137,7 +137,7 @@ func (t *transformedRanger) Size() int64 { // CalcEncompassingBlocks is a useful helper function that, given an offset, // length, and blockSize, will tell you which blocks contain the requested -// offset and length +// offset and length. func CalcEncompassingBlocks(offset, length int64, blockSize int) ( firstBlock, blockCount int64) { firstBlock = offset / int64(blockSize) diff --git a/vendor/storj.io/common/errs2/collect.go b/vendor/storj.io/common/errs2/collect.go index c9d6669b0..68bb93ee2 100644 --- a/vendor/storj.io/common/errs2/collect.go +++ b/vendor/storj.io/common/errs2/collect.go @@ -9,7 +9,7 @@ import ( "github.com/zeebo/errs" ) -// Collect returns first error from channel and all errors that happen within duration +// Collect returns first error from channel and all errors that happen within duration. func Collect(errch chan error, duration time.Duration) error { errch = discardNil(errch) errlist := []error{<-errch} @@ -24,7 +24,7 @@ func Collect(errch chan error, duration time.Duration) error { } } -// discard nil errors that are returned from services +// discard nil errors that are returned from services. func discardNil(ch chan error) chan error { r := make(chan error) go func() { diff --git a/vendor/storj.io/common/errs2/ignore.go b/vendor/storj.io/common/errs2/ignore.go index 7f3c38e7d..55437af0f 100644 --- a/vendor/storj.io/common/errs2/ignore.go +++ b/vendor/storj.io/common/errs2/ignore.go @@ -8,7 +8,6 @@ import ( "github.com/zeebo/errs" - "storj.io/common/internal/grpchook" "storj.io/common/rpc/rpcstatus" ) @@ -16,7 +15,6 @@ import ( func IsCanceled(err error) bool { return errs.IsFunc(err, func(err error) bool { return err == context.Canceled || - grpchook.IsErrServerStopped(err) || rpcstatus.Code(err) == rpcstatus.Canceled }) } diff --git a/vendor/storj.io/common/fpath/editor.go b/vendor/storj.io/common/fpath/editor.go index 2788a2819..2e8787065 100644 --- a/vendor/storj.io/common/fpath/editor.go +++ b/vendor/storj.io/common/fpath/editor.go @@ -10,7 +10,7 @@ import ( "strings" ) -//EditFile opens the best OS-specific text editor we can find +//EditFile opens the best OS-specific text editor we can find. func EditFile(fileToEdit string) error { editorPath := getEditorPath() if editorPath == "" { diff --git a/vendor/storj.io/common/fpath/os.go b/vendor/storj.io/common/fpath/os.go index 4898de134..f97bea3ec 100644 --- a/vendor/storj.io/common/fpath/os.go +++ b/vendor/storj.io/common/fpath/os.go @@ -15,7 +15,7 @@ import ( "github.com/zeebo/errs" ) -// IsRoot returns whether path is the root directory +// IsRoot returns whether path is the root directory. func IsRoot(path string) bool { abs, err := filepath.Abs(path) if err == nil { @@ -25,7 +25,7 @@ func IsRoot(path string) bool { return filepath.Dir(path) == path } -// ApplicationDir returns best base directory for specific OS +// ApplicationDir returns best base directory for specific OS. func ApplicationDir(subdir ...string) string { for i := range subdir { if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { @@ -62,7 +62,7 @@ func ApplicationDir(subdir ...string) string { return filepath.Join(append([]string{appdir}, subdir...)...) } -// IsValidSetupDir checks if directory is valid for setup configuration +// IsValidSetupDir checks if directory is valid for setup configuration. func IsValidSetupDir(name string) (ok bool, err error) { _, err = os.Stat(name) if err != nil { @@ -100,7 +100,7 @@ func IsValidSetupDir(name string) (ok bool, err error) { } } -// IsWritable determines if a directory is writeable +// IsWritable determines if a directory is writeable. func IsWritable(filepath string) (bool, error) { info, err := os.Stat(filepath) if err != nil { diff --git a/vendor/storj.io/common/fpath/path.go b/vendor/storj.io/common/fpath/path.go index b197c185e..505127641 100644 --- a/vendor/storj.io/common/fpath/path.go +++ b/vendor/storj.io/common/fpath/path.go @@ -50,7 +50,7 @@ func parseBucket(o string) (bucket, rest string) { return "", o } -// New creates new FPath from the given URL +// New creates new FPath from the given URL. func New(p string) (FPath, error) { fp := FPath{original: p} @@ -101,7 +101,7 @@ func New(p string) (FPath, error) { return fp, nil } -// Join is appends the given segment to the path +// Join is appends the given segment to the path. func (p FPath) Join(segment string) FPath { if p.local { p.original = filepath.Join(p.original, segment) @@ -113,7 +113,7 @@ func (p FPath) Join(segment string) FPath { return p } -// Base returns the last segment of the path +// Base returns the last segment of the path. func (p FPath) Base() string { if p.local { return filepath.Base(p.original) @@ -124,12 +124,12 @@ func (p FPath) Base() string { return path.Base(p.path) } -// Bucket returns the first segment of path +// Bucket returns the first segment of path. func (p FPath) Bucket() string { return p.bucket } -// Path returns the URL path without the scheme +// Path returns the URL path without the scheme. func (p FPath) Path() string { if p.local { return p.original @@ -137,12 +137,12 @@ func (p FPath) Path() string { return p.path } -// IsLocal returns whether the path refers to local or remote location +// IsLocal returns whether the path refers to local or remote location. func (p FPath) IsLocal() bool { return p.local } -// String returns the entire URL (untouched) +// String returns the entire URL (untouched). func (p FPath) String() string { return p.original } diff --git a/vendor/storj.io/common/fpath/temp_data.go b/vendor/storj.io/common/fpath/temp_data.go index d796eaa18..5a6ad5614 100644 --- a/vendor/storj.io/common/fpath/temp_data.go +++ b/vendor/storj.io/common/fpath/temp_data.go @@ -11,7 +11,7 @@ import "context" // other packages. type key int -// temp is the context key for temp struct +// temp is the context key for temp struct. const tempKey key = 0 type temp struct { @@ -19,7 +19,7 @@ type temp struct { directory string } -// WithTempData creates context with information how store temporary data, in memory or on disk +// WithTempData creates context with information how store temporary data, in memory or on disk. func WithTempData(ctx context.Context, directory string, inmemory bool) context.Context { temp := temp{ inmemory: inmemory, @@ -28,7 +28,7 @@ func WithTempData(ctx context.Context, directory string, inmemory bool) context. return context.WithValue(ctx, tempKey, temp) } -// GetTempData returns if temporary data should be stored in memory or on disk +// GetTempData returns if temporary data should be stored in memory or on disk. func GetTempData(ctx context.Context) (string, bool, bool) { tempValue, ok := ctx.Value(tempKey).(temp) if !ok { diff --git a/vendor/storj.io/common/identity/certificate_authority.go b/vendor/storj.io/common/identity/certificate_authority.go index cda099864..9dbee4fa3 100644 --- a/vendor/storj.io/common/identity/certificate_authority.go +++ b/vendor/storj.io/common/identity/certificate_authority.go @@ -26,7 +26,7 @@ import ( const minimumLoggableDifficulty = 8 -// PeerCertificateAuthority represents the CA which is used to validate peer identities +// PeerCertificateAuthority represents the CA which is used to validate peer identities. type PeerCertificateAuthority struct { RestChain []*x509.Certificate // Cert is the x509 certificate of the CA @@ -35,7 +35,7 @@ type PeerCertificateAuthority struct { ID storj.NodeID } -// FullCertificateAuthority represents the CA which is used to author and validate full identities +// FullCertificateAuthority represents the CA which is used to author and validate full identities. type FullCertificateAuthority struct { RestChain []*x509.Certificate // Cert is the x509 certificate of the CA @@ -46,7 +46,7 @@ type FullCertificateAuthority struct { Key crypto.PrivateKey } -// CASetupConfig is for creating a CA +// CASetupConfig is for creating a CA. type CASetupConfig struct { VersionNumber uint `default:"0" help:"which identity version to use (0 is latest)"` ParentCertPath string `help:"path to the parent authority's certificate chain"` @@ -59,7 +59,7 @@ type CASetupConfig struct { Concurrency uint `help:"number of concurrent workers for certificate authority generation" default:"4"` } -// NewCAOptions is used to pass parameters to `NewCA` +// NewCAOptions is used to pass parameters to `NewCA`. type NewCAOptions struct { // VersionNumber is the IDVersion to use for the identity VersionNumber storj.IDVersionNumber @@ -75,18 +75,18 @@ type NewCAOptions struct { Logger io.Writer } -// PeerCAConfig is for locating a CA certificate without a private key +// PeerCAConfig is for locating a CA certificate without a private key. type PeerCAConfig struct { CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"` } -// FullCAConfig is for locating a CA certificate and it's private key +// FullCAConfig is for locating a CA certificate and it's private key. type FullCAConfig struct { CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"` KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/ca.key"` } -// NewCA creates a new full identity with the given difficulty +// NewCA creates a new full identity with the given difficulty. func NewCA(ctx context.Context, opts NewCAOptions) (_ *FullCertificateAuthority, err error) { defer mon.Task()(&ctx)(&err) var ( @@ -201,12 +201,12 @@ func NewCA(ctx context.Context, opts NewCAOptions) (_ *FullCertificateAuthority, return ca, nil } -// Status returns the status of the CA cert/key files for the config +// Status returns the status of the CA cert/key files for the config. func (caS CASetupConfig) Status() (TLSFilesStatus, error) { return statTLSFiles(caS.CertPath, caS.KeyPath) } -// Create generates and saves a CA using the config +// Create generates and saves a CA using the config. func (caS CASetupConfig) Create(ctx context.Context, logger io.Writer) (*FullCertificateAuthority, error) { var ( err error @@ -249,7 +249,7 @@ func (caS CASetupConfig) Create(ctx context.Context, logger io.Writer) (*FullCer return ca, caC.Save(ca) } -// FullConfig converts a `CASetupConfig` to `FullCAConfig` +// FullConfig converts a `CASetupConfig` to `FullCAConfig`. func (caS CASetupConfig) FullConfig() FullCAConfig { return FullCAConfig{ CertPath: caS.CertPath, @@ -257,7 +257,7 @@ func (caS CASetupConfig) FullConfig() FullCAConfig { } } -// Load loads a CA from the given configuration +// Load loads a CA from the given configuration. func (fc FullCAConfig) Load() (*FullCertificateAuthority, error) { p, err := fc.PeerConfig().Load() if err != nil { @@ -281,14 +281,14 @@ func (fc FullCAConfig) Load() (*FullCertificateAuthority, error) { }, nil } -// PeerConfig converts a full ca config to a peer ca config +// PeerConfig converts a full ca config to a peer ca config. func (fc FullCAConfig) PeerConfig() PeerCAConfig { return PeerCAConfig{ CertPath: fc.CertPath, } } -// Save saves a CA with the given configuration +// Save saves a CA with the given configuration. func (fc FullCAConfig) Save(ca *FullCertificateAuthority) error { var ( keyData bytes.Buffer @@ -313,7 +313,7 @@ func (fc FullCAConfig) Save(ca *FullCertificateAuthority) error { return writeErrs.Err() } -// SaveBackup saves the certificate of the config wth a timestamped filename +// SaveBackup saves the certificate of the config wth a timestamped filename. func (fc FullCAConfig) SaveBackup(ca *FullCertificateAuthority) error { return FullCAConfig{ CertPath: backupPath(fc.CertPath), @@ -321,7 +321,7 @@ func (fc FullCAConfig) SaveBackup(ca *FullCertificateAuthority) error { }.Save(ca) } -// Load loads a CA from the given configuration +// Load loads a CA from the given configuration. func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) { chainPEM, err := ioutil.ReadFile(pc.CertPath) if err != nil { @@ -350,7 +350,7 @@ func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) { }, nil } -// Save saves a peer CA (cert, no key) with the given configuration +// Save saves a peer CA (cert, no key) with the given configuration. func (pc PeerCAConfig) Save(ca *PeerCertificateAuthority) error { var ( certData bytes.Buffer @@ -373,7 +373,7 @@ func (pc PeerCAConfig) Save(ca *PeerCertificateAuthority) error { return nil } -// SaveBackup saves the certificate of the config wth a timestamped filename +// SaveBackup saves the certificate of the config wth a timestamped filename. func (pc PeerCAConfig) SaveBackup(ca *PeerCertificateAuthority) error { return PeerCAConfig{ CertPath: backupPath(pc.CertPath), @@ -422,12 +422,12 @@ func (ca *FullCertificateAuthority) NewIdentity(exts ...pkix.Extension) (*FullId } -// Chain returns the CA's certificate chain +// Chain returns the CA's certificate chain. func (ca *FullCertificateAuthority) Chain() []*x509.Certificate { return append([]*x509.Certificate{ca.Cert}, ca.RestChain...) } -// RawChain returns the CA's certificate chain as a 2d byte slice +// RawChain returns the CA's certificate chain as a 2d byte slice. func (ca *FullCertificateAuthority) RawChain() [][]byte { chain := ca.Chain() rawChain := make([][]byte, len(chain)) @@ -437,7 +437,7 @@ func (ca *FullCertificateAuthority) RawChain() [][]byte { return rawChain } -// RawRestChain returns the "rest" (excluding `ca.Cert`) of the certificate chain as a 2d byte slice +// RawRestChain returns the "rest" (excluding `ca.Cert`) of the certificate chain as a 2d byte slice. func (ca *FullCertificateAuthority) RawRestChain() [][]byte { var chain [][]byte for _, cert := range ca.RestChain { @@ -446,7 +446,7 @@ func (ca *FullCertificateAuthority) RawRestChain() [][]byte { return chain } -// PeerCA converts a FullCertificateAuthority to a PeerCertificateAuthority +// PeerCA converts a FullCertificateAuthority to a PeerCertificateAuthority. func (ca *FullCertificateAuthority) PeerCA() *PeerCertificateAuthority { return &PeerCertificateAuthority{ Cert: ca.Cert, @@ -455,7 +455,7 @@ func (ca *FullCertificateAuthority) PeerCA() *PeerCertificateAuthority { } } -// Sign signs the passed certificate with ca certificate +// Sign signs the passed certificate with ca certificate. func (ca *FullCertificateAuthority) Sign(cert *x509.Certificate) (*x509.Certificate, error) { signedCert, err := peertls.CreateCertificate(cert.PublicKey, ca.Key, cert, ca.Cert) if err != nil { diff --git a/vendor/storj.io/common/identity/generate.go b/vendor/storj.io/common/identity/generate.go index 51e30627e..70f5fbf7d 100644 --- a/vendor/storj.io/common/identity/generate.go +++ b/vendor/storj.io/common/identity/generate.go @@ -50,7 +50,7 @@ func GenerateKey(ctx context.Context, minDifficulty uint16, version storj.IDVers } // GenerateCallback indicates that key generation is done when done is true. -// if err != nil key generation will stop with that error +// if err != nil key generation will stop with that error. type GenerateCallback func(crypto.PrivateKey, storj.NodeID) (done bool, err error) // GenerateKeys continues to generate keys until found returns done == false, diff --git a/vendor/storj.io/common/identity/identity.go b/vendor/storj.io/common/identity/identity.go index 9aa08101a..dd65fdb36 100644 --- a/vendor/storj.io/common/identity/identity.go +++ b/vendor/storj.io/common/identity/identity.go @@ -295,12 +295,12 @@ func NewManageableFullIdentity(ident *FullIdentity, ca *FullCertificateAuthority } } -// Status returns the status of the identity cert/key files for the config +// Status returns the status of the identity cert/key files for the config. func (is SetupConfig) Status() (TLSFilesStatus, error) { return statTLSFiles(is.CertPath, is.KeyPath) } -// Create generates and saves a CA using the config +// Create generates and saves a CA using the config. func (is SetupConfig) Create(ca *FullCertificateAuthority) (*FullIdentity, error) { fi, err := ca.NewIdentity() if err != nil { @@ -314,7 +314,7 @@ func (is SetupConfig) Create(ca *FullCertificateAuthority) (*FullIdentity, error return fi, ic.Save(fi) } -// FullConfig converts a `SetupConfig` to `Config` +// FullConfig converts a `SetupConfig` to `Config`. func (is SetupConfig) FullConfig() Config { return Config{ CertPath: is.CertPath, @@ -322,7 +322,7 @@ func (is SetupConfig) FullConfig() Config { } } -// Load loads a FullIdentity from the config +// Load loads a FullIdentity from the config. func (ic Config) Load() (*FullIdentity, error) { c, err := ioutil.ReadFile(ic.CertPath) if err != nil { @@ -340,7 +340,7 @@ func (ic Config) Load() (*FullIdentity, error) { return fi, nil } -// Save saves a FullIdentity according to the config +// Save saves a FullIdentity according to the config. func (ic Config) Save(fi *FullIdentity) error { var ( certData, keyData bytes.Buffer @@ -371,7 +371,7 @@ func (ic Config) Save(fi *FullIdentity) error { ) } -// SaveBackup saves the certificate of the config with a timestamped filename +// SaveBackup saves the certificate of the config with a timestamped filename. func (ic Config) SaveBackup(fi *FullIdentity) error { return Config{ CertPath: backupPath(ic.CertPath), @@ -379,14 +379,14 @@ func (ic Config) SaveBackup(fi *FullIdentity) error { }.Save(fi) } -// PeerConfig converts a Config to a PeerConfig +// PeerConfig converts a Config to a PeerConfig. func (ic Config) PeerConfig() *PeerConfig { return &PeerConfig{ CertPath: ic.CertPath, } } -// Load loads a PeerIdentity from the config +// Load loads a PeerIdentity from the config. func (ic PeerConfig) Load() (*PeerIdentity, error) { c, err := ioutil.ReadFile(ic.CertPath) if err != nil { @@ -399,7 +399,7 @@ func (ic PeerConfig) Load() (*PeerIdentity, error) { return pi, nil } -// Save saves a PeerIdentity according to the config +// Save saves a PeerIdentity according to the config. func (ic PeerConfig) Save(peerIdent *PeerIdentity) error { chain := []*x509.Certificate{peerIdent.Leaf, peerIdent.CA} chain = append(chain, peerIdent.RestChain...) @@ -417,19 +417,19 @@ func (ic PeerConfig) Save(peerIdent *PeerIdentity) error { return nil } -// SaveBackup saves the certificate of the config with a timestamped filename +// SaveBackup saves the certificate of the config with a timestamped filename. func (ic PeerConfig) SaveBackup(pi *PeerIdentity) error { return PeerConfig{ CertPath: backupPath(ic.CertPath), }.Save(pi) } -// Chain returns the Identity's certificate chain +// Chain returns the Identity's certificate chain. func (fi *FullIdentity) Chain() []*x509.Certificate { return append([]*x509.Certificate{fi.Leaf, fi.CA}, fi.RestChain...) } -// RawChain returns all of the certificate chain as a 2d byte slice +// RawChain returns all of the certificate chain as a 2d byte slice. func (fi *FullIdentity) RawChain() [][]byte { chain := fi.Chain() rawChain := make([][]byte, len(chain)) @@ -439,7 +439,7 @@ func (fi *FullIdentity) RawChain() [][]byte { return rawChain } -// RawRestChain returns the rest (excluding leaf and CA) of the certificate chain as a 2d byte slice +// RawRestChain returns the rest (excluding leaf and CA) of the certificate chain as a 2d byte slice. func (fi *FullIdentity) RawRestChain() [][]byte { rawChain := make([][]byte, len(fi.RestChain)) for _, cert := range fi.RestChain { @@ -448,7 +448,7 @@ func (fi *FullIdentity) RawRestChain() [][]byte { return rawChain } -// PeerIdentity converts a FullIdentity into a PeerIdentity +// PeerIdentity converts a FullIdentity into a PeerIdentity. func (fi *FullIdentity) PeerIdentity() *PeerIdentity { return &PeerIdentity{ CA: fi.CA, @@ -508,7 +508,7 @@ func backupPath(path string) string { ) } -// EncodePeerIdentity encodes the complete identity chain to bytes +// EncodePeerIdentity encodes the complete identity chain to bytes. func EncodePeerIdentity(pi *PeerIdentity) []byte { var chain []byte chain = append(chain, pi.Leaf.Raw...) @@ -519,7 +519,7 @@ func EncodePeerIdentity(pi *PeerIdentity) []byte { return chain } -// DecodePeerIdentity Decodes the bytes into complete identity chain +// DecodePeerIdentity Decodes the bytes into complete identity chain. func DecodePeerIdentity(ctx context.Context, chain []byte) (_ *PeerIdentity, err error) { defer mon.Task()(&ctx)(&err) diff --git a/vendor/storj.io/common/identity/utils.go b/vendor/storj.io/common/identity/utils.go index f7a25fb0a..f80901315 100644 --- a/vendor/storj.io/common/identity/utils.go +++ b/vendor/storj.io/common/identity/utils.go @@ -13,10 +13,10 @@ import ( "storj.io/common/fpath" ) -// TLSFilesStatus is the status of keys +// TLSFilesStatus is the status of keys. type TLSFilesStatus int -// Four possible outcomes for four files +// Four possible outcomes for four files. const ( NoCertNoKey = TLSFilesStatus(iota) CertNoKey @@ -25,11 +25,11 @@ const ( ) var ( - // ErrZeroBytes is returned for zero slice + // ErrZeroBytes is returned for zero slice. ErrZeroBytes = errs.New("byte slice was unexpectedly empty") ) -// writeChainData writes data to path ensuring permissions are appropriate for a cert +// writeChainData writes data to path ensuring permissions are appropriate for a cert. func writeChainData(path string, data []byte) error { err := writeFile(path, 0744, 0644, data) if err != nil { @@ -38,7 +38,7 @@ func writeChainData(path string, data []byte) error { return nil } -// writeKeyData writes data to path ensuring permissions are appropriate for a cert +// writeKeyData writes data to path ensuring permissions are appropriate for a cert. func writeKeyData(path string, data []byte) error { err := writeFile(path, 0700, 0600, data) if err != nil { @@ -47,7 +47,7 @@ func writeKeyData(path string, data []byte) error { return nil } -// writeFile writes to path, creating directories and files with the necessary permissions +// writeFile writes to path, creating directories and files with the necessary permissions. func writeFile(path string, dirmode, filemode os.FileMode, data []byte) error { if err := os.MkdirAll(filepath.Dir(path), dirmode); err != nil { return errs.Wrap(err) diff --git a/vendor/storj.io/common/internal/grpchook/hook.go b/vendor/storj.io/common/internal/grpchook/hook.go deleted file mode 100644 index 40319eb3b..000000000 --- a/vendor/storj.io/common/internal/grpchook/hook.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2019 Storj Labs, Inc. -// See LICENSE for copying information. - -// Package grpchook exists to avoid introducing a dependency to -// grpc unless pb/pbgrpc is imported in other packages. -package grpchook - -import ( - "context" - "crypto/tls" - "errors" - "net" -) - -// ErrNotHooked is returned from funcs when pb/pbgrpc hasn't been imported. -var ErrNotHooked = errors.New("grpc not hooked") - -// HookedErrServerStopped is the grpc.ErrServerStopped when initialized. -var HookedErrServerStopped error - -// IsErrServerStopped returns when err == grpc.ErrServerStopped and -// pb/pbgrpc has been imported. -func IsErrServerStopped(err error) bool { - if HookedErrServerStopped == nil { - return false - } - - return HookedErrServerStopped == err -} - -// HookedInternalFromContext returns grpc peer information from context. -var HookedInternalFromContext func(ctx context.Context) (addr net.Addr, state tls.ConnectionState, err error) - -// InternalFromContext returns the peer that was previously associated by NewContext using grpc. -func InternalFromContext(ctx context.Context) (addr net.Addr, state tls.ConnectionState, err error) { - if HookedInternalFromContext == nil { - return nil, tls.ConnectionState{}, ErrNotHooked - } - - return HookedInternalFromContext(ctx) -} - -// StatusCode is rpcstatus.Code, however it cannot use it directly without introducing a -// circular dependency. -type StatusCode uint64 - -// HookedErrorWrap is the func to wrap a status code. -var HookedErrorWrap func(code StatusCode, err error) error - -// HookedConvertToStatusCode tries to convert grpc error status to rpcstatus.StatusCode -var HookedConvertToStatusCode func(err error) (StatusCode, bool) diff --git a/vendor/storj.io/common/macaroon/apikey.go b/vendor/storj.io/common/macaroon/apikey.go index 4db01466d..cf9ae22a6 100644 --- a/vendor/storj.io/common/macaroon/apikey.go +++ b/vendor/storj.io/common/macaroon/apikey.go @@ -16,40 +16,40 @@ import ( ) var ( - // Error is a general API Key error + // Error is a general API Key error. Error = errs.Class("api key error") - // ErrFormat means that the structural formatting of the API Key is invalid + // ErrFormat means that the structural formatting of the API Key is invalid. ErrFormat = errs.Class("api key format error") - // ErrInvalid means that the API Key is improperly signed + // ErrInvalid means that the API Key is improperly signed. ErrInvalid = errs.Class("api key invalid error") - // ErrUnauthorized means that the API key does not grant the requested permission + // ErrUnauthorized means that the API key does not grant the requested permission. ErrUnauthorized = errs.Class("api key unauthorized error") - // ErrRevoked means the API key has been revoked + // ErrRevoked means the API key has been revoked. ErrRevoked = errs.Class("api key revocation error") mon = monkit.Package() ) -// ActionType specifies the operation type being performed that the Macaroon will validate +// ActionType specifies the operation type being performed that the Macaroon will validate. type ActionType int const ( - // not using iota because these values are persisted in macaroons + // not using iota because these values are persisted in macaroons. _ ActionType = 0 - // ActionRead specifies a read operation + // ActionRead specifies a read operation. ActionRead ActionType = 1 - // ActionWrite specifies a read operation + // ActionWrite specifies a read operation. ActionWrite ActionType = 2 - // ActionList specifies a read operation + // ActionList specifies a read operation. ActionList ActionType = 3 - // ActionDelete specifies a read operation + // ActionDelete specifies a read operation. ActionDelete ActionType = 4 - // ActionProjectInfo requests project-level information + // ActionProjectInfo requests project-level information. ActionProjectInfo ActionType = 5 ) -// Action specifies the specific operation being performed that the Macaroon will validate +// Action specifies the specific operation being performed that the Macaroon will validate. type Action struct { Op ActionType Bucket []byte @@ -86,8 +86,8 @@ func ParseRawAPIKey(data []byte) (*APIKey, error) { return &APIKey{mac: mac}, nil } -// NewAPIKey generates a brand new unrestricted API key given the provided -// server project secret +// NewAPIKey generates a brand new unrestricted API key given the provided. +// server project secret. func NewAPIKey(secret []byte) (*APIKey, error) { mac, err := NewUnrestricted(secret) if err != nil { @@ -134,13 +134,13 @@ func (a *APIKey) Check(ctx context.Context, secret []byte, action Action, revoke // AllowedBuckets stores information about which buckets are // allowed to be accessed, where `Buckets` stores names of buckets that are -// allowed and `All` is a bool that indicates if all buckets are allowed or not +// allowed and `All` is a bool that indicates if all buckets are allowed or not. type AllowedBuckets struct { All bool Buckets map[string]struct{} } -// GetAllowedBuckets returns a list of all the allowed bucket paths that match the Action operation +// GetAllowedBuckets returns a list of all the allowed bucket paths that match the Action operation. func (a *APIKey) GetAllowedBuckets(ctx context.Context, action Action) (allowed AllowedBuckets, err error) { defer mon.Task()(&ctx)(&err) @@ -211,12 +211,12 @@ func (a *APIKey) Tail() []byte { return a.mac.Tail() } -// Serialize serializes the API Key to a string +// Serialize serializes the API Key to a string. func (a *APIKey) Serialize() string { return base58.CheckEncode(a.mac.Serialize(), 0) } -// SerializeRaw serialize the API Key to raw bytes +// SerializeRaw serialize the API Key to raw bytes. func (a *APIKey) SerializeRaw() []byte { return a.mac.Serialize() } diff --git a/vendor/storj.io/common/macaroon/macaroon.go b/vendor/storj.io/common/macaroon/macaroon.go index 973d0d9e4..acc1f50eb 100644 --- a/vendor/storj.io/common/macaroon/macaroon.go +++ b/vendor/storj.io/common/macaroon/macaroon.go @@ -10,14 +10,14 @@ import ( "crypto/subtle" ) -// Macaroon is a struct that determine contextual caveats and authorization +// Macaroon is a struct that determine contextual caveats and authorization. type Macaroon struct { head []byte caveats [][]byte tail []byte } -// NewUnrestricted creates Macaroon with random Head and generated Tail +// NewUnrestricted creates Macaroon with random Head and generated Tail. func NewUnrestricted(secret []byte) (*Macaroon, error) { head, err := NewSecret() if err != nil { @@ -40,7 +40,7 @@ func sign(secret []byte, data []byte) []byte { return signer.Sum(nil) } -// NewSecret generates cryptographically random 32 bytes +// NewSecret generates cryptographically random 32 bytes. func NewSecret() (secret []byte, err error) { secret = make([]byte, 32) @@ -52,7 +52,7 @@ func NewSecret() (secret []byte, err error) { return secret, nil } -// AddFirstPartyCaveat creates signed macaroon with appended caveat +// AddFirstPartyCaveat creates signed macaroon with appended caveat. func (m *Macaroon) AddFirstPartyCaveat(c []byte) (macaroon *Macaroon, err error) { macaroon = m.Copy() @@ -63,7 +63,7 @@ func (m *Macaroon) AddFirstPartyCaveat(c []byte) (macaroon *Macaroon, err error) } // Validate reconstructs with all caveats from the secret and compares tails, -// returning true if the tails match +// returning true if the tails match. func (m *Macaroon) Validate(secret []byte) (ok bool) { tail := sign(secret, m.head) for _, cav := range m.caveats { @@ -73,7 +73,7 @@ func (m *Macaroon) Validate(secret []byte) (ok bool) { return subtle.ConstantTimeCompare(tail, m.tail) == 1 } -// Tails returns all ancestor tails up to and including the current tail +// Tails returns all ancestor tails up to and including the current tail. func (m *Macaroon) Tails(secret []byte) [][]byte { tails := make([][]byte, 0, len(m.caveats)+1) tail := sign(secret, m.head) @@ -85,7 +85,7 @@ func (m *Macaroon) Tails(secret []byte) [][]byte { return tails } -// Head returns copy of macaroon head +// Head returns copy of macaroon head. func (m *Macaroon) Head() (head []byte) { if len(m.head) == 0 { return nil @@ -93,12 +93,12 @@ func (m *Macaroon) Head() (head []byte) { return append([]byte(nil), m.head...) } -// CaveatLen returns the number of caveats this macaroon has +// CaveatLen returns the number of caveats this macaroon has. func (m *Macaroon) CaveatLen() int { return len(m.caveats) } -// Caveats returns copy of macaroon caveats +// Caveats returns copy of macaroon caveats. func (m *Macaroon) Caveats() (caveats [][]byte) { if len(m.caveats) == 0 { return nil @@ -110,7 +110,7 @@ func (m *Macaroon) Caveats() (caveats [][]byte) { return caveats } -// Tail returns copy of macaroon tail +// Tail returns copy of macaroon tail. func (m *Macaroon) Tail() (tail []byte) { if len(m.tail) == 0 { return nil @@ -118,7 +118,7 @@ func (m *Macaroon) Tail() (tail []byte) { return append([]byte(nil), m.tail...) } -// Copy return copy of macaroon +// Copy return copy of macaroon. func (m *Macaroon) Copy() *Macaroon { return &Macaroon{ head: m.Head(), diff --git a/vendor/storj.io/common/macaroon/serialize.go b/vendor/storj.io/common/macaroon/serialize.go index 8bd15065c..cc3e283e7 100644 --- a/vendor/storj.io/common/macaroon/serialize.go +++ b/vendor/storj.io/common/macaroon/serialize.go @@ -27,7 +27,7 @@ type packet struct { data []byte } -// Serialize converts macaroon to binary format +// Serialize converts macaroon to binary format. func (m *Macaroon) Serialize() (data []byte) { // Start data from version int data = append(data, version) @@ -59,7 +59,7 @@ func (m *Macaroon) Serialize() (data []byte) { return data } -// serializePacket converts packet to binary +// serializePacket converts packet to binary. func serializePacket(data []byte, p packet) []byte { data = appendVarint(data, int(p.fieldType)) data = appendVarint(data, len(p.data)) @@ -75,7 +75,7 @@ func appendVarint(data []byte, x int) []byte { return append(data, buf[:n]...) } -// ParseMacaroon converts binary to macaroon +// ParseMacaroon converts binary to macaroon. func ParseMacaroon(data []byte) (_ *Macaroon, err error) { if len(data) < 2 { return nil, errors.New("empty macaroon") @@ -152,7 +152,7 @@ func ParseMacaroon(data []byte) (_ *Macaroon, err error) { return &mac, nil } -// parseSection returns data leftover and packet array +// parseSection returns data leftover and packet array. func parseSection(data []byte) ([]byte, []packet, error) { prevFieldType := fieldType(-1) var packets []packet @@ -176,7 +176,7 @@ func parseSection(data []byte) ([]byte, []packet, error) { } } -// parsePacket returns data leftover and packet +// parsePacket returns data leftover and packet. func parsePacket(data []byte) ([]byte, packet, error) { data, ft, err := parseVarint(data) if err != nil { diff --git a/vendor/storj.io/common/memory/size.go b/vendor/storj.io/common/memory/size.go index b34ff01e9..cca376618 100644 --- a/vendor/storj.io/common/memory/size.go +++ b/vendor/storj.io/common/memory/size.go @@ -10,7 +10,7 @@ import ( "strings" ) -// base 2 and base 10 sizes +// base 2 and base 10 sizes. const ( B Size = 1 << (10 * iota) KiB @@ -28,55 +28,55 @@ const ( EB Size = 1e18 ) -// Size implements flag.Value for collecting memory size in bytes +// Size implements flag.Value for collecting memory size in bytes. type Size int64 -// Int returns bytes size as int +// Int returns bytes size as int. func (size Size) Int() int { return int(size) } -// Int32 returns bytes size as int32 +// Int32 returns bytes size as int32. func (size Size) Int32() int32 { return int32(size) } -// Int64 returns bytes size as int64 +// Int64 returns bytes size as int64. func (size Size) Int64() int64 { return int64(size) } -// Float64 returns bytes size as float64 +// Float64 returns bytes size as float64. func (size Size) Float64() float64 { return float64(size) } -// KiB returns size in kibibytes +// KiB returns size in kibibytes. func (size Size) KiB() float64 { return size.Float64() / KiB.Float64() } -// MiB returns size in mebibytes +// MiB returns size in mebibytes. func (size Size) MiB() float64 { return size.Float64() / MiB.Float64() } -// GiB returns size in gibibytes +// GiB returns size in gibibytes. func (size Size) GiB() float64 { return size.Float64() / GiB.Float64() } -// TiB returns size in tebibytes +// TiB returns size in tebibytes. func (size Size) TiB() float64 { return size.Float64() / TiB.Float64() } -// PiB returns size in pebibytes +// PiB returns size in pebibytes. func (size Size) PiB() float64 { return size.Float64() / PiB.Float64() } -// EiB returns size in exbibytes +// EiB returns size in exbibytes. func (size Size) EiB() float64 { return size.Float64() / EiB.Float64() } -// KB returns size in kilobytes +// KB returns size in kilobytes. func (size Size) KB() float64 { return size.Float64() / KB.Float64() } -// MB returns size in megabytes +// MB returns size in megabytes. func (size Size) MB() float64 { return size.Float64() / MB.Float64() } -// GB returns size in gigabytes +// GB returns size in gigabytes. func (size Size) GB() float64 { return size.Float64() / GB.Float64() } -// TB returns size in terabytes +// TB returns size in terabytes. func (size Size) TB() float64 { return size.Float64() / TB.Float64() } -// PB returns size in petabytes +// PB returns size in petabytes. func (size Size) PB() float64 { return size.Float64() / PB.Float64() } -// EB returns size in exabytes +// EB returns size in exabytes. func (size Size) EB() float64 { return size.Float64() / EB.Float64() } // String converts size to a string using base-2 prefixes, unless the number @@ -99,7 +99,7 @@ func countZeros(num, base int64) (count int) { return count } -// Base2String converts size to a string using base-2 prefixes +// Base2String converts size to a string using base-2 prefixes. func (size Size) Base2String() string { if size == 0 { return "0 B" @@ -123,7 +123,7 @@ func (size Size) Base2String() string { return strconv.FormatInt(size.Int64(), 10) + " B" } -// Base10String converts size to a string using base-10 prefixes +// Base10String converts size to a string using base-10 prefixes. func (size Size) Base10String() string { if size == 0 { return "0 B" @@ -158,7 +158,7 @@ func isLetter(b byte) bool { return ('a' <= b && b <= 'z') || ('A' <= b && b <= 'Z') } -// Set updates value from string +// Set updates value from string. func (size *Size) Set(s string) error { if s == "" { return errors.New("empty size") @@ -219,7 +219,7 @@ func (size *Size) Set(s string) error { return nil } -// Type implements pflag.Value +// Type implements pflag.Value. func (Size) Type() string { return "memory.Size" } // MarshalText returns size as a string. diff --git a/vendor/storj.io/common/memory/sizes.go b/vendor/storj.io/common/memory/sizes.go index 803154aa1..021acbd34 100644 --- a/vendor/storj.io/common/memory/sizes.go +++ b/vendor/storj.io/common/memory/sizes.go @@ -5,13 +5,13 @@ package memory import "strings" -// Sizes implements flag.Value for collecting memory size +// Sizes implements flag.Value for collecting memory size. type Sizes struct { Default []Size Custom []Size } -// Sizes returns the loaded values +// Sizes returns the loaded values. func (sizes Sizes) Sizes() []Size { if len(sizes.Custom) > 0 { return sizes.Custom @@ -19,7 +19,7 @@ func (sizes Sizes) Sizes() []Size { return sizes.Default } -// String converts values to a string +// String converts values to a string. func (sizes Sizes) String() string { sz := sizes.Sizes() xs := make([]string, len(sz)) @@ -29,7 +29,7 @@ func (sizes Sizes) String() string { return strings.Join(xs, " ") } -// Set adds values from byte values +// Set adds values from byte values. func (sizes *Sizes) Set(s string) error { for _, x := range strings.Fields(s) { var size Size diff --git a/vendor/storj.io/common/memory/string.go b/vendor/storj.io/common/memory/string.go index 25ae05a8c..e4a4d561f 100644 --- a/vendor/storj.io/common/memory/string.go +++ b/vendor/storj.io/common/memory/string.go @@ -3,12 +3,12 @@ package memory -// FormatBytes converts number of bytes to appropriately sized string +// FormatBytes converts number of bytes to appropriately sized string. func FormatBytes(bytes int64) string { return Size(bytes).String() } -// ParseString converts string to number of bytes +// ParseString converts string to number of bytes. func ParseString(s string) (int64, error) { var size Size err := size.Set(s) diff --git a/vendor/storj.io/common/netutil/tracking.go b/vendor/storj.io/common/netutil/tracking.go index 1fb12cd60..726b74a90 100644 --- a/vendor/storj.io/common/netutil/tracking.go +++ b/vendor/storj.io/common/netutil/tracking.go @@ -9,7 +9,7 @@ import ( ) // closeTrackingConn wraps a net.Conn and keeps track of if it was closed -// or if it was leaked (and closes it if it was leaked.) +// or if it was leaked (and closes it if it was leaked). type closeTrackingConn struct { net.Conn } diff --git a/vendor/storj.io/common/paths/path.go b/vendor/storj.io/common/paths/path.go index 6d60dcde7..c97f2651e 100644 --- a/vendor/storj.io/common/paths/path.go +++ b/vendor/storj.io/common/paths/path.go @@ -61,7 +61,7 @@ func (path Unencrypted) Iterator() Iterator { return NewIterator(path.raw) } -// Less returns true if 'path' should be sorted earlier than 'other' +// Less returns true if 'path' should be sorted earlier than 'other'. func (path Unencrypted) Less(other Unencrypted) bool { return path.raw < other.raw } @@ -104,7 +104,7 @@ func (path Encrypted) Iterator() Iterator { return NewIterator(path.raw) } -// Less returns true if 'path' should be sorted earlier than 'other' +// Less returns true if 'path' should be sorted earlier than 'other'. func (path Encrypted) Less(other Encrypted) bool { return path.raw < other.raw } diff --git a/vendor/storj.io/common/pb/datarepair.pb.go b/vendor/storj.io/common/pb/datarepair.pb.go index 7f31f2f0c..fb508619f 100644 --- a/vendor/storj.io/common/pb/datarepair.pb.go +++ b/vendor/storj.io/common/pb/datarepair.pb.go @@ -23,7 +23,7 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// InjuredSegment is the queue item used for the data repair queue +// InjuredSegment is the queue item used for the data repair queue. type InjuredSegment struct { Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` LostPieces []int32 `protobuf:"varint,2,rep,packed,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"` diff --git a/vendor/storj.io/common/pb/datarepair.proto b/vendor/storj.io/common/pb/datarepair.proto index b2ddad246..ad6697081 100644 --- a/vendor/storj.io/common/pb/datarepair.proto +++ b/vendor/storj.io/common/pb/datarepair.proto @@ -8,7 +8,7 @@ import "google/protobuf/timestamp.proto"; package repair; -// InjuredSegment is the queue item used for the data repair queue +// InjuredSegment is the queue item used for the data repair queue. message InjuredSegment { bytes path = 1; repeated int32 lost_pieces = 2; diff --git a/vendor/storj.io/common/pb/gracefulexit.pb.go b/vendor/storj.io/common/pb/gracefulexit.pb.go index 35e598925..82bfae09b 100644 --- a/vendor/storj.io/common/pb/gracefulexit.pb.go +++ b/vendor/storj.io/common/pb/gracefulexit.pb.go @@ -146,7 +146,7 @@ func (m *InitiateGracefulExitRequest) XXX_DiscardUnknown() { var xxx_messageInfo_InitiateGracefulExitRequest proto.InternalMessageInfo -// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit +// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit. type NonExitingSatellite struct { NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` DomainName string `protobuf:"bytes,2,opt,name=domain_name,json=domainName,proto3" json:"domain_name,omitempty"` diff --git a/vendor/storj.io/common/pb/gracefulexit.proto b/vendor/storj.io/common/pb/gracefulexit.proto index 218a85567..a827a360a 100644 --- a/vendor/storj.io/common/pb/gracefulexit.proto +++ b/vendor/storj.io/common/pb/gracefulexit.proto @@ -11,7 +11,7 @@ import "orders.proto"; package gracefulexit; -// NodeGracefulExit is a private service on storagenodes +// NodeGracefulExit is a private service on storagenodes. service NodeGracefulExit { // GetSatellitesList returns a list of satellites that the storagenode has not exited. rpc GetNonExitingSatellites(GetNonExitingSatellitesRequest) returns (GetNonExitingSatellitesResponse); @@ -27,7 +27,7 @@ message InitiateGracefulExitRequest { bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; } -// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit +// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit. message NonExitingSatellite { bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; string domain_name = 2; @@ -53,7 +53,7 @@ message ExitProgress { } service SatelliteGracefulExit { - // Process is called by storage nodes to initiate the graceful exit, get pieces to transfer, and receive exit status. + // Process is called by storage nodes to initiate the graceful exit, get pieces to transfer, and receive exit status. rpc Process(stream StorageNodeMessage) returns (stream SatelliteMessage); } diff --git a/vendor/storj.io/common/pb/inspector.pb.go b/vendor/storj.io/common/pb/inspector.pb.go index 0b179f632..f606cc575 100644 --- a/vendor/storj.io/common/pb/inspector.pb.go +++ b/vendor/storj.io/common/pb/inspector.pb.go @@ -26,7 +26,6 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// ListSegments type ListIrreparableSegmentsRequest struct { Limit int32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` LastSeenSegmentPath []byte `protobuf:"bytes,2,opt,name=last_seen_segment_path,json=lastSeenSegmentPath,proto3" json:"last_seen_segment_path,omitempty"` @@ -181,7 +180,6 @@ func (m *ListIrreparableSegmentsResponse) GetSegments() []*IrreparableSegment { return nil } -// CountNodes type CountNodesResponse struct { Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` diff --git a/vendor/storj.io/common/pb/inspector.proto b/vendor/storj.io/common/pb/inspector.proto index df5977b3a..f393c1606 100644 --- a/vendor/storj.io/common/pb/inspector.proto +++ b/vendor/storj.io/common/pb/inspector.proto @@ -37,7 +37,6 @@ service HealthInspector { rpc SegmentHealth(SegmentHealthRequest) returns (SegmentHealthResponse) {} } -// ListSegments message ListIrreparableSegmentsRequest { int32 limit = 1; bytes last_seen_segment_path = 2; @@ -55,7 +54,6 @@ message ListIrreparableSegmentsResponse { repeated IrreparableSegment segments = 1; } -// CountNodes message CountNodesResponse { int64 count = 1; } diff --git a/vendor/storj.io/common/pb/meta.pb.go b/vendor/storj.io/common/pb/meta.pb.go index 143cd07e4..824c6d005 100644 --- a/vendor/storj.io/common/pb/meta.pb.go +++ b/vendor/storj.io/common/pb/meta.pb.go @@ -21,7 +21,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// SerializableMeta is the object metadata that will be stored serialized +// SerializableMeta is the object metadata that will be stored serialized. type SerializableMeta struct { ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` UserDefined map[string]string `protobuf:"bytes,2,rep,name=user_defined,json=userDefined,proto3" json:"user_defined,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` diff --git a/vendor/storj.io/common/pb/meta.proto b/vendor/storj.io/common/pb/meta.proto index 4737339d3..f3813be56 100644 --- a/vendor/storj.io/common/pb/meta.proto +++ b/vendor/storj.io/common/pb/meta.proto @@ -6,7 +6,7 @@ option go_package = "storj.io/common/pb"; package objects; -// SerializableMeta is the object metadata that will be stored serialized +// SerializableMeta is the object metadata that will be stored serialized. message SerializableMeta { string content_type = 1; map user_defined = 2; diff --git a/vendor/storj.io/common/pb/metainfo.pb.go b/vendor/storj.io/common/pb/metainfo.pb.go index 6544f6166..3f86cee1d 100644 --- a/vendor/storj.io/common/pb/metainfo.pb.go +++ b/vendor/storj.io/common/pb/metainfo.pb.go @@ -2581,7 +2581,6 @@ func (m *ObjectFinishDeleteResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ObjectFinishDeleteResponse proto.InternalMessageInfo -// only for satellite use type SatStreamID struct { Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` diff --git a/vendor/storj.io/common/pb/metainfo.proto b/vendor/storj.io/common/pb/metainfo.proto index 366a261c1..ea143af29 100644 --- a/vendor/storj.io/common/pb/metainfo.proto +++ b/vendor/storj.io/common/pb/metainfo.proto @@ -17,7 +17,7 @@ import "orders.proto"; // * If someone will add stream_id or segment_id to existing or new message in this protobuf // then Batch method (satellite/metainfo/batch.go) needs to be updated as well -// Metainfo it's a satellite RPC service +// Metainfo it's a satellite RPC service. service Metainfo { // Bucket rpc CreateBucket(BucketCreateRequest) returns (BucketCreateResponse); @@ -392,7 +392,10 @@ message ObjectFinishDeleteRequest { message ObjectFinishDeleteResponse { } -// only for satellite use +// +// Only for satellite use +// + message SatStreamID { bytes bucket = 1; bytes encrypted_path = 2; diff --git a/vendor/storj.io/common/pb/node.pb.go b/vendor/storj.io/common/pb/node.pb.go index 0d4edd6d8..e6f39f4fc 100644 --- a/vendor/storj.io/common/pb/node.pb.go +++ b/vendor/storj.io/common/pb/node.pb.go @@ -23,7 +23,7 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// NodeType is an enum of possible node types +// NodeType is an enum of possible node types. type NodeType int32 const ( @@ -58,7 +58,7 @@ func (NodeType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0c843d59d2d938e7, []int{0} } -// NodeTransport is an enum of possible transports for the overlay network +// NodeTransport is an enum of possible transports for the overlay network. type NodeTransport int32 const ( @@ -81,9 +81,8 @@ func (NodeTransport) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0c843d59d2d938e7, []int{1} } -// TODO move statdb.Update() stuff out of here -// Node represents a node in the overlay network -// Node is info for a updating a single storagenode, used in the Update rpc calls +// Node represents a node in the overlay network. +// Node is info for a updating a single storagenode, used in the Update rpc calls. type Node struct { Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"` Address *NodeAddress `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` @@ -132,7 +131,7 @@ func (m *Node) GetDeprecatedLastIp() string { return "" } -// NodeAddress contains the information needed to communicate with a node on the network +// NodeAddress contains the information needed to communicate with a node on the network. type NodeAddress struct { Transport NodeTransport `protobuf:"varint,1,opt,name=transport,proto3,enum=node.NodeTransport" json:"transport,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` @@ -179,7 +178,7 @@ func (m *NodeAddress) GetAddress() string { return "" } -// NodeOperator contains info about the storage node operator +// NodeOperator contains info about the storage node operator. type NodeOperator struct { Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"` @@ -226,7 +225,7 @@ func (m *NodeOperator) GetWallet() string { return "" } -// NodeCapacity contains all relevant data about a nodes ability to store data +// NodeCapacity contains all relevant data about a nodes ability to store data. type NodeCapacity struct { FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"` // Deprecated: Do not use. FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"` @@ -274,7 +273,7 @@ func (m *NodeCapacity) GetFreeDisk() int64 { return 0 } -// Deprecated: use NodeOperator instead +// Deprecated: use NodeOperator instead. type NodeMetadata struct { Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"` @@ -321,7 +320,7 @@ func (m *NodeMetadata) GetWallet() string { return "" } -// Deprecated: use NodeCapacity instead +// Deprecated: use NodeCapacity instead. type NodeRestrictions struct { FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"` FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"` @@ -368,7 +367,7 @@ func (m *NodeRestrictions) GetFreeDisk() int64 { return 0 } -// NodeVersion contains +// NodeVersion contains version information about a node. type NodeVersion struct { Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` CommitHash string `protobuf:"bytes,2,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"` diff --git a/vendor/storj.io/common/pb/node.proto b/vendor/storj.io/common/pb/node.proto index e6ec958e1..c2c860680 100644 --- a/vendor/storj.io/common/pb/node.proto +++ b/vendor/storj.io/common/pb/node.proto @@ -9,9 +9,8 @@ package node; import "gogo.proto"; import "google/protobuf/timestamp.proto"; -// TODO move statdb.Update() stuff out of here -// Node represents a node in the overlay network -// Node is info for a updating a single storagenode, used in the Update rpc calls +// Node represents a node in the overlay network. +// Node is info for a updating a single storagenode, used in the Update rpc calls. message Node { bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; NodeAddress address = 2; @@ -20,7 +19,7 @@ message Node { reserved "type", "restrictions", "reputation", "metadata", "latency_list", "audit_success", "is_up", "update_latency", "update_audit_success", "update_uptime", "version"; } -// NodeType is an enum of possible node types +// NodeType is an enum of possible node types. enum NodeType { INVALID = 0; SATELLITE = 1; @@ -29,42 +28,42 @@ enum NodeType { BOOTSTRAP = 4 [deprecated=true]; } -// NodeAddress contains the information needed to communicate with a node on the network +// NodeAddress contains the information needed to communicate with a node on the network. message NodeAddress { NodeTransport transport = 1; string address = 2; } -// NodeTransport is an enum of possible transports for the overlay network +// NodeTransport is an enum of possible transports for the overlay network. enum NodeTransport { TCP_TLS_GRPC = 0; } -// NodeOperator contains info about the storage node operator +// NodeOperator contains info about the storage node operator. message NodeOperator { string email = 1; string wallet = 2; } -// NodeCapacity contains all relevant data about a nodes ability to store data +// NodeCapacity contains all relevant data about a nodes ability to store data. message NodeCapacity { int64 free_bandwidth = 1 [deprecated=true]; int64 free_disk = 2; } -// Deprecated: use NodeOperator instead +// Deprecated: use NodeOperator instead. message NodeMetadata { string email = 1; string wallet = 2; } -// Deprecated: use NodeCapacity instead +// Deprecated: use NodeCapacity instead. message NodeRestrictions { int64 free_bandwidth = 1; int64 free_disk = 2; } -// NodeVersion contains +// NodeVersion contains version information about a node. message NodeVersion { string version = 1; // must be semver formatted string commit_hash = 2; diff --git a/vendor/storj.io/common/pb/nodestats.pb.go b/vendor/storj.io/common/pb/nodestats.pb.go index 1c447d8e1..d8d269f73 100644 --- a/vendor/storj.io/common/pb/nodestats.pb.go +++ b/vendor/storj.io/common/pb/nodestats.pb.go @@ -27,14 +27,17 @@ var _ = time.Kitchen const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type ReputationStats struct { - TotalCount int64 `protobuf:"varint,1,opt,name=total_count,json=totalCount,proto3" json:"total_count,omitempty"` - SuccessCount int64 `protobuf:"varint,2,opt,name=success_count,json=successCount,proto3" json:"success_count,omitempty"` - ReputationAlpha float64 `protobuf:"fixed64,3,opt,name=reputation_alpha,json=reputationAlpha,proto3" json:"reputation_alpha,omitempty"` - ReputationBeta float64 `protobuf:"fixed64,4,opt,name=reputation_beta,json=reputationBeta,proto3" json:"reputation_beta,omitempty"` - ReputationScore float64 `protobuf:"fixed64,5,opt,name=reputation_score,json=reputationScore,proto3" json:"reputation_score,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + TotalCount int64 `protobuf:"varint,1,opt,name=total_count,json=totalCount,proto3" json:"total_count,omitempty"` + SuccessCount int64 `protobuf:"varint,2,opt,name=success_count,json=successCount,proto3" json:"success_count,omitempty"` + ReputationAlpha float64 `protobuf:"fixed64,3,opt,name=reputation_alpha,json=reputationAlpha,proto3" json:"reputation_alpha,omitempty"` + ReputationBeta float64 `protobuf:"fixed64,4,opt,name=reputation_beta,json=reputationBeta,proto3" json:"reputation_beta,omitempty"` + ReputationScore float64 `protobuf:"fixed64,5,opt,name=reputation_score,json=reputationScore,proto3" json:"reputation_score,omitempty"` + UnknownReputationAlpha float64 `protobuf:"fixed64,6,opt,name=unknown_reputation_alpha,json=unknownReputationAlpha,proto3" json:"unknown_reputation_alpha,omitempty"` + UnknownReputationBeta float64 `protobuf:"fixed64,7,opt,name=unknown_reputation_beta,json=unknownReputationBeta,proto3" json:"unknown_reputation_beta,omitempty"` + UnknownReputationScore float64 `protobuf:"fixed64,8,opt,name=unknown_reputation_score,json=unknownReputationScore,proto3" json:"unknown_reputation_score,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ReputationStats) Reset() { *m = ReputationStats{} } @@ -96,6 +99,27 @@ func (m *ReputationStats) GetReputationScore() float64 { return 0 } +func (m *ReputationStats) GetUnknownReputationAlpha() float64 { + if m != nil { + return m.UnknownReputationAlpha + } + return 0 +} + +func (m *ReputationStats) GetUnknownReputationBeta() float64 { + if m != nil { + return m.UnknownReputationBeta + } + return 0 +} + +func (m *ReputationStats) GetUnknownReputationScore() float64 { + if m != nil { + return m.UnknownReputationScore + } + return 0 +} + type GetStatsRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -435,51 +459,53 @@ func init() { func init() { proto.RegisterFile("nodestats.proto", fileDescriptor_e0b184ee117142aa) } var fileDescriptor_e0b184ee117142aa = []byte{ - // 691 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x4e, 0xdb, 0x4a, - 0x18, 0xbd, 0x76, 0xb8, 0x5c, 0xf2, 0x25, 0x10, 0x18, 0xc2, 0x55, 0x6e, 0xae, 0xd4, 0xa0, 0x50, - 0x89, 0x74, 0x93, 0xa8, 0x29, 0x8b, 0x4a, 0x55, 0x17, 0x04, 0xa4, 0x96, 0x45, 0x7f, 0xe4, 0xd0, - 0x4d, 0x17, 0xb5, 0x26, 0x9e, 0x0f, 0x33, 0x90, 0x78, 0x8c, 0x67, 0xdc, 0xaa, 0xcb, 0x6e, 0xbb, - 0xea, 0x1b, 0xf4, 0x75, 0xfa, 0x04, 0x5d, 0x74, 0x01, 0x7d, 0x94, 0x6a, 0x66, 0x9c, 0x3f, 0x03, - 0x25, 0x5d, 0xe6, 0x7c, 0xe7, 0x9c, 0x8c, 0xcf, 0x99, 0x6f, 0xa0, 0x12, 0x09, 0x86, 0x52, 0x51, - 0x25, 0xdb, 0x71, 0x22, 0x94, 0x20, 0xc5, 0x09, 0x50, 0x87, 0x50, 0x84, 0xc2, 0xc2, 0xf5, 0x46, - 0x28, 0x44, 0x38, 0xc4, 0x8e, 0xf9, 0x35, 0x48, 0x4f, 0x3a, 0x8a, 0x8f, 0x34, 0x6d, 0x14, 0x5b, - 0x42, 0xf3, 0xbb, 0x03, 0x15, 0x0f, 0xe3, 0x54, 0x51, 0xc5, 0x45, 0xd4, 0xd7, 0x06, 0xa4, 0x01, - 0x25, 0x25, 0x14, 0x1d, 0xfa, 0x81, 0x48, 0x23, 0x55, 0x73, 0xb6, 0x9d, 0x56, 0xc1, 0x03, 0x03, - 0x1d, 0x68, 0x84, 0xec, 0xc0, 0xaa, 0x4c, 0x83, 0x00, 0xa5, 0xcc, 0x28, 0xae, 0xa1, 0x94, 0x33, - 0xd0, 0x92, 0x1e, 0xc0, 0x7a, 0x32, 0x31, 0xf6, 0xe9, 0x30, 0x3e, 0xa5, 0xb5, 0xc2, 0xb6, 0xd3, - 0x72, 0xbc, 0xca, 0x14, 0xdf, 0xd7, 0x30, 0xd9, 0x85, 0x19, 0xc8, 0x1f, 0xa0, 0xa2, 0xb5, 0x25, - 0xc3, 0x5c, 0x9b, 0xc2, 0x3d, 0x54, 0x34, 0xe7, 0x29, 0x03, 0x91, 0x60, 0xed, 0xef, 0xbc, 0x67, - 0x5f, 0xc3, 0xcd, 0x0d, 0xa8, 0x3c, 0x43, 0x65, 0x3e, 0xc8, 0xc3, 0x8b, 0x14, 0xa5, 0x6a, 0x5e, - 0xb9, 0xb0, 0x3e, 0xc5, 0x64, 0x2c, 0x22, 0x89, 0xe4, 0x29, 0x94, 0xd3, 0x58, 0xa7, 0xe2, 0x07, - 0xa7, 0x18, 0x9c, 0x9b, 0xaf, 0x2d, 0x75, 0xeb, 0xed, 0x69, 0xc0, 0xb9, 0x78, 0xbc, 0x92, 0xe5, - 0x1f, 0x68, 0x3a, 0x79, 0x02, 0x25, 0x9a, 0x32, 0xae, 0x32, 0xb5, 0x7b, 0xa7, 0x1a, 0x0c, 0xdd, - 0x8a, 0x9f, 0x43, 0x99, 0x71, 0x79, 0x91, 0xd2, 0x21, 0x3f, 0xe1, 0xc8, 0x4c, 0x3c, 0x5a, 0x6d, - 0x4b, 0x6b, 0x8f, 0x4b, 0x6b, 0x1f, 0x8f, 0x4b, 0xeb, 0xad, 0x7c, 0xbb, 0x6c, 0x38, 0x5f, 0xae, - 0x1a, 0x8e, 0x37, 0xa7, 0x24, 0x3d, 0x28, 0xca, 0x54, 0xc6, 0x18, 0x31, 0x64, 0x26, 0xbb, 0x45, - 0x6d, 0xa6, 0x32, 0xb2, 0x0f, 0xc5, 0x33, 0xc1, 0x23, 0x64, 0x3e, 0x55, 0x26, 0xd5, 0xbb, 0x3d, - 0xfe, 0x32, 0x1e, 0x2b, 0x56, 0xb6, 0xaf, 0x9a, 0x9f, 0x1d, 0xa8, 0x1d, 0x52, 0x3e, 0xfc, 0xd8, - 0x57, 0x22, 0xa1, 0x21, 0xbe, 0x91, 0x34, 0xc4, 0x2c, 0x7e, 0xf2, 0x18, 0x96, 0x4e, 0x12, 0x31, - 0x9a, 0x24, 0xbc, 0x88, 0xb5, 0x51, 0x90, 0x3d, 0x70, 0x95, 0x98, 0x64, 0xbb, 0x88, 0xce, 0x55, - 0xa2, 0xf9, 0xd5, 0x85, 0xff, 0x6e, 0x38, 0x4c, 0xd6, 0xfb, 0x2e, 0xfc, 0xa3, 0x4b, 0xf2, 0x39, - 0x33, 0x07, 0x2a, 0xf7, 0xd6, 0xb4, 0xf8, 0xc7, 0x65, 0x63, 0xf9, 0xa5, 0x60, 0x78, 0x74, 0xe8, - 0x2d, 0xeb, 0xf1, 0x11, 0x23, 0x14, 0x36, 0x99, 0x76, 0xf1, 0xa5, 0xb5, 0xf1, 0x53, 0xed, 0x53, - 0x73, 0xb7, 0x0b, 0xad, 0x52, 0xf7, 0xe1, 0x4c, 0xd3, 0xb7, 0xfe, 0x57, 0x7b, 0x0e, 0xdc, 0x60, - 0x79, 0x5e, 0xfd, 0x3d, 0x94, 0x67, 0x7f, 0x93, 0x26, 0xac, 0x52, 0xe5, 0x27, 0x28, 0x95, 0x6f, - 0xb6, 0xce, 0x9c, 0xd0, 0xf1, 0x4a, 0x54, 0x79, 0x28, 0xd5, 0xb1, 0x86, 0x74, 0xe3, 0x93, 0x5d, - 0xfe, 0xa3, 0x68, 0xa6, 0xb2, 0xe6, 0x16, 0x6c, 0xbe, 0x4e, 0x78, 0xc0, 0xa3, 0xf0, 0x85, 0x60, - 0x38, 0x1c, 0xef, 0xc9, 0x4f, 0x07, 0xaa, 0xf3, 0x78, 0x96, 0xd9, 0x1e, 0xfc, 0x8b, 0x61, 0xa2, - 0xd7, 0x7e, 0x40, 0x23, 0xf6, 0x81, 0x33, 0x75, 0xea, 0xc7, 0x09, 0x0f, 0x30, 0x7b, 0x23, 0xaa, - 0x76, 0xda, 0x1b, 0x0f, 0xb5, 0x89, 0x51, 0x25, 0x18, 0x53, 0x9e, 0x5c, 0x53, 0xd9, 0x67, 0xa3, - 0x6a, 0xa7, 0x39, 0x55, 0x0b, 0xd6, 0x19, 0x97, 0xe7, 0xbe, 0x8c, 0x69, 0x80, 0x19, 0xbf, 0x60, - 0xf8, 0x6b, 0x1a, 0xef, 0x6b, 0xd8, 0x32, 0xbb, 0xb0, 0x65, 0x57, 0x30, 0x6f, 0xbf, 0x64, 0xe8, - 0x9b, 0x66, 0x38, 0xef, 0xde, 0xfd, 0xe4, 0x42, 0x51, 0xf7, 0x6c, 0x1f, 0xbc, 0x03, 0x58, 0x19, - 0xbf, 0x0b, 0x64, 0x76, 0x77, 0x73, 0x0f, 0x48, 0xfd, 0xff, 0x1b, 0x67, 0x59, 0x38, 0xef, 0x60, - 0xe3, 0xda, 0x0d, 0x20, 0x3b, 0xbf, 0xbf, 0x1f, 0xd6, 0xf6, 0xfe, 0x22, 0x97, 0x88, 0xbc, 0x82, - 0xf2, 0x6c, 0x29, 0xe4, 0xde, 0x8c, 0xea, 0x86, 0x16, 0xeb, 0x8d, 0x5b, 0xe7, 0xd6, 0xb0, 0x57, - 0x7d, 0x4b, 0xf4, 0x95, 0x3e, 0x6b, 0x73, 0xd1, 0x09, 0xc4, 0x68, 0x24, 0xa2, 0x4e, 0x3c, 0x18, - 0x2c, 0x9b, 0xcb, 0xf3, 0xe8, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0x8d, 0x3b, 0x4a, 0x62, - 0x06, 0x00, 0x00, + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xec, 0xf4, 0x4b, 0x93, 0x9b, 0xb4, 0x69, 0xa7, 0x69, 0x09, 0x41, 0x22, 0x55, 0x8a, + 0xd4, 0xb0, 0x49, 0x44, 0xa8, 0x50, 0x25, 0xc4, 0xa2, 0x69, 0x25, 0xe8, 0x82, 0x1f, 0x39, 0x65, + 0xc3, 0x02, 0x6b, 0xe2, 0x99, 0xba, 0xd3, 0x26, 0x1e, 0xd7, 0x33, 0xa6, 0x62, 0x09, 0x4b, 0x56, + 0xbc, 0x01, 0xaf, 0xc3, 0x33, 0xb0, 0x68, 0x79, 0x14, 0x34, 0x33, 0xce, 0x9f, 0x93, 0xd2, 0xb0, + 0xf4, 0xb9, 0xe7, 0x1c, 0x5f, 0xdd, 0x73, 0xe7, 0x42, 0x29, 0xe0, 0x84, 0x0a, 0x89, 0xa5, 0x68, + 0x86, 0x11, 0x97, 0x1c, 0xe5, 0x47, 0x40, 0x15, 0x7c, 0xee, 0x73, 0x03, 0x57, 0x6b, 0x3e, 0xe7, + 0x7e, 0x9f, 0xb6, 0xf4, 0x57, 0x2f, 0x3e, 0x6d, 0x49, 0x36, 0x50, 0xb4, 0x41, 0x68, 0x08, 0xf5, + 0xaf, 0x19, 0x28, 0x39, 0x34, 0x8c, 0x25, 0x96, 0x8c, 0x07, 0x5d, 0x65, 0x80, 0x6a, 0x50, 0x90, + 0x5c, 0xe2, 0xbe, 0xeb, 0xf1, 0x38, 0x90, 0x15, 0x6b, 0xdb, 0x6a, 0x64, 0x1c, 0xd0, 0xd0, 0xa1, + 0x42, 0xd0, 0x0e, 0xac, 0x88, 0xd8, 0xf3, 0xa8, 0x10, 0x09, 0xc5, 0xd6, 0x94, 0x62, 0x02, 0x1a, + 0xd2, 0x63, 0x58, 0x8b, 0x46, 0xc6, 0x2e, 0xee, 0x87, 0x67, 0xb8, 0x92, 0xd9, 0xb6, 0x1a, 0x96, + 0x53, 0x1a, 0xe3, 0x07, 0x0a, 0x46, 0xbb, 0x30, 0x01, 0xb9, 0x3d, 0x2a, 0x71, 0x65, 0x49, 0x33, + 0x57, 0xc7, 0x70, 0x87, 0x4a, 0x9c, 0xf2, 0x14, 0x1e, 0x8f, 0x68, 0xe5, 0xff, 0xb4, 0x67, 0x57, + 0xc1, 0x68, 0x1f, 0x2a, 0x71, 0x70, 0x11, 0xf0, 0xab, 0xc0, 0x9d, 0x69, 0x23, 0xab, 0x25, 0x5b, + 0x49, 0xdd, 0x49, 0x75, 0xf3, 0x0c, 0xee, 0xcd, 0x51, 0xea, 0xae, 0x96, 0xb5, 0x70, 0x73, 0x46, + 0xa8, 0x9b, 0x9b, 0xff, 0x47, 0xd3, 0x64, 0xee, 0x96, 0x3f, 0xea, 0x5e, 0xeb, 0xeb, 0x50, 0x7a, + 0x49, 0xa5, 0x1e, 0xbe, 0x43, 0x2f, 0x63, 0x2a, 0x64, 0xfd, 0xc6, 0x86, 0xb5, 0x31, 0x26, 0x42, + 0x1e, 0x08, 0x8a, 0x5e, 0x40, 0x31, 0x0e, 0x55, 0x82, 0xae, 0x77, 0x46, 0xbd, 0x0b, 0x9d, 0x4c, + 0xa1, 0x5d, 0x6d, 0x8e, 0x97, 0x21, 0x15, 0xa5, 0x53, 0x30, 0xfc, 0x43, 0x45, 0x47, 0xcf, 0xa1, + 0x80, 0x63, 0xc2, 0x64, 0xa2, 0xb6, 0xef, 0x54, 0x83, 0xa6, 0x1b, 0xf1, 0x2b, 0x28, 0x12, 0x26, + 0x2e, 0x63, 0xdc, 0x67, 0xa7, 0x8c, 0x12, 0x1d, 0xa5, 0x52, 0x9b, 0x05, 0x6b, 0x0e, 0x17, 0xac, + 0x79, 0x32, 0x5c, 0xb0, 0x4e, 0xee, 0xe7, 0x75, 0xcd, 0xfa, 0x7e, 0x53, 0xb3, 0x9c, 0x29, 0x25, + 0xea, 0x40, 0x5e, 0xc4, 0x22, 0xa4, 0x01, 0xa1, 0x44, 0xe7, 0xbc, 0xa8, 0xcd, 0x58, 0x86, 0x0e, + 0x20, 0x7f, 0xce, 0x59, 0x40, 0x89, 0x8b, 0xa5, 0xde, 0x80, 0xbb, 0x3d, 0xfe, 0xd3, 0x1e, 0x39, + 0x23, 0x3b, 0x90, 0xf5, 0x6f, 0x16, 0x54, 0x8e, 0x30, 0xeb, 0x7f, 0xee, 0x4a, 0x1e, 0x61, 0x9f, + 0xbe, 0x17, 0xd8, 0xa7, 0xc9, 0xf8, 0xd1, 0x3e, 0x2c, 0x9d, 0x46, 0x7c, 0x30, 0x9a, 0xf0, 0x22, + 0xd6, 0x5a, 0x81, 0xf6, 0xc0, 0x96, 0x7c, 0x34, 0xdb, 0x45, 0x74, 0xb6, 0xe4, 0xf5, 0x1f, 0x36, + 0xdc, 0x9f, 0xd3, 0x4c, 0x92, 0xfb, 0x2e, 0x2c, 0xab, 0x90, 0x5c, 0x46, 0x74, 0x43, 0xc5, 0xce, + 0xaa, 0x12, 0xff, 0xba, 0xae, 0x65, 0xdf, 0x70, 0x42, 0x8f, 0x8f, 0x9c, 0xac, 0x2a, 0x1f, 0x13, + 0x84, 0x61, 0x83, 0x28, 0x17, 0x57, 0x18, 0x1b, 0x37, 0x56, 0x3e, 0x15, 0x7b, 0x3b, 0xd3, 0x28, + 0xb4, 0x9f, 0x4c, 0x24, 0x7d, 0xeb, 0xbf, 0x9a, 0x53, 0xe0, 0x3a, 0x49, 0xf3, 0xaa, 0x9f, 0xa0, + 0x38, 0xf9, 0x8d, 0xea, 0xb0, 0x82, 0xa5, 0x1b, 0x51, 0x21, 0x5d, 0x7d, 0x21, 0x74, 0x87, 0x96, + 0x53, 0xc0, 0xd2, 0xa1, 0x42, 0x9e, 0x28, 0x48, 0x25, 0x3e, 0xba, 0x3b, 0xff, 0x34, 0x9a, 0xb1, + 0xac, 0xbe, 0x09, 0x1b, 0xef, 0x22, 0xe6, 0xb1, 0xc0, 0x7f, 0xcd, 0x09, 0xed, 0x0f, 0xdf, 0xc9, + 0x6f, 0x0b, 0xca, 0xd3, 0x78, 0x32, 0xb3, 0x3d, 0xd8, 0xa2, 0x7e, 0xa4, 0x4e, 0x54, 0x0f, 0x07, + 0xe4, 0x8a, 0x11, 0x79, 0xe6, 0x86, 0x11, 0xf3, 0x68, 0x72, 0xcf, 0xca, 0xa6, 0xda, 0x19, 0x16, + 0x95, 0x89, 0x56, 0x45, 0x34, 0xc4, 0x2c, 0x9a, 0x51, 0x99, 0x13, 0x57, 0x36, 0xd5, 0x94, 0xaa, + 0x01, 0x6b, 0x84, 0x89, 0x0b, 0x57, 0x84, 0xd8, 0xa3, 0x09, 0x3f, 0xa3, 0xf9, 0xab, 0x0a, 0xef, + 0x2a, 0xd8, 0x30, 0xdb, 0xb0, 0x69, 0x9e, 0x60, 0xda, 0x7e, 0x49, 0xd3, 0x37, 0x74, 0x71, 0xda, + 0xbd, 0xfd, 0xc5, 0x86, 0xbc, 0xca, 0xd9, 0x1c, 0xe7, 0x43, 0xc8, 0x0d, 0xef, 0x02, 0x9a, 0x7c, + 0xbb, 0xa9, 0x03, 0x52, 0x7d, 0x30, 0xb7, 0x96, 0x0c, 0xe7, 0x23, 0xac, 0xcf, 0x6c, 0x00, 0xda, + 0xf9, 0xfb, 0x7e, 0x18, 0xdb, 0x47, 0x8b, 0x2c, 0x11, 0x7a, 0x0b, 0xc5, 0xc9, 0x50, 0xd0, 0xc3, + 0x09, 0xd5, 0x9c, 0x14, 0xab, 0xb5, 0x5b, 0xeb, 0xc6, 0xb0, 0x53, 0xfe, 0x80, 0xd4, 0x4a, 0x9f, + 0x37, 0x19, 0x6f, 0x79, 0x7c, 0x30, 0xe0, 0x41, 0x2b, 0xec, 0xf5, 0xb2, 0x7a, 0x79, 0x9e, 0xfe, + 0x09, 0x00, 0x00, 0xff, 0xff, 0x6d, 0xa2, 0xc8, 0x62, 0x0e, 0x07, 0x00, 0x00, } // --- DRPC BEGIN --- diff --git a/vendor/storj.io/common/pb/nodestats.proto b/vendor/storj.io/common/pb/nodestats.proto index 6b8fb9e06..90513b7c8 100644 --- a/vendor/storj.io/common/pb/nodestats.proto +++ b/vendor/storj.io/common/pb/nodestats.proto @@ -21,6 +21,9 @@ message ReputationStats { double reputation_alpha = 3; double reputation_beta = 4; double reputation_score = 5; + double unknown_reputation_alpha = 6; + double unknown_reputation_beta = 7; + double unknown_reputation_score = 8; } message GetStatsRequest {} diff --git a/vendor/storj.io/common/pb/orders.pb.go b/vendor/storj.io/common/pb/orders.pb.go index a483f0fbe..5d59ed080 100644 --- a/vendor/storj.io/common/pb/orders.pb.go +++ b/vendor/storj.io/common/pb/orders.pb.go @@ -26,7 +26,7 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// PieceAction is an enumeration of all possible executed actions on storage node +// PieceAction is an enumeration of all possible executed actions on storage node. type PieceAction int32 const ( @@ -98,7 +98,7 @@ func (SettlementResponse_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor_e0f5d4cf0fc9e41b, []int{7, 0} } -// OrderLimit is provided by satellite to execute specific action on storage node within some limits +// OrderLimit is provided by satellite to execute specific action on storage node within some limits. type OrderLimit struct { // unique serial to avoid replay attacks SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` @@ -200,7 +200,7 @@ func (m *OrderLimit) GetSatelliteAddress() *NodeAddress { return nil } -// OrderLimitSigning provides OrderLimit signing serialization +// OrderLimitSigning provides OrderLimit signing serialization. // // It is never used for sending across the network, it is // used in signing to ensure that nullable=false fields get handled properly. @@ -306,7 +306,7 @@ func (m *OrderLimitSigning) GetSatelliteAddress() *NodeAddress { return nil } -// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber +// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber. type Order struct { // serial of the order limit that was signed SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` @@ -357,7 +357,7 @@ func (m *Order) GetUplinkSignature() []byte { return nil } -// OrderSigning provides Order signing format +// OrderSigning provides Order signing format. // // It is never used for sending across the network, it is // used in signing to ensure that nullable=false fields get handled properly. diff --git a/vendor/storj.io/common/pb/orders.proto b/vendor/storj.io/common/pb/orders.proto index c05cc4c00..66179684d 100644 --- a/vendor/storj.io/common/pb/orders.proto +++ b/vendor/storj.io/common/pb/orders.proto @@ -10,7 +10,7 @@ import "gogo.proto"; import "google/protobuf/timestamp.proto"; import "node.proto"; -// PieceAction is an enumeration of all possible executed actions on storage node +// PieceAction is an enumeration of all possible executed actions on storage node. enum PieceAction { INVALID = 0; PUT = 1; @@ -22,7 +22,7 @@ enum PieceAction { PUT_GRACEFUL_EXIT = 7; } -// OrderLimit is provided by satellite to execute specific action on storage node within some limits +// OrderLimit is provided by satellite to execute specific action on storage node within some limits. message OrderLimit { // unique serial to avoid replay attacks bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; @@ -51,7 +51,7 @@ message OrderLimit { node.NodeAddress satellite_address = 11; } -// OrderLimitSigning provides OrderLimit signing serialization +// OrderLimitSigning provides OrderLimit signing serialization. // // It is never used for sending across the network, it is // used in signing to ensure that nullable=false fields get handled properly. @@ -85,7 +85,7 @@ message OrderLimitSigning { node.NodeAddress satellite_address = 11; } -// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber +// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber. message Order { // serial of the order limit that was signed bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; @@ -95,7 +95,7 @@ message Order { bytes uplink_signature = 3; } -// OrderSigning provides Order signing format +// OrderSigning provides Order signing format. // // It is never used for sending across the network, it is // used in signing to ensure that nullable=false fields get handled properly. diff --git a/vendor/storj.io/common/pb/payments.pb.go b/vendor/storj.io/common/pb/payments.pb.go index a6a7e539b..1cc9127d2 100644 --- a/vendor/storj.io/common/pb/payments.pb.go +++ b/vendor/storj.io/common/pb/payments.pb.go @@ -95,9 +95,10 @@ func (m *PrepareInvoiceRecordsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PrepareInvoiceRecordsResponse proto.InternalMessageInfo type ApplyInvoiceRecordsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ApplyInvoiceRecordsRequest) Reset() { *m = ApplyInvoiceRecordsRequest{} } @@ -124,6 +125,13 @@ func (m *ApplyInvoiceRecordsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ApplyInvoiceRecordsRequest proto.InternalMessageInfo +func (m *ApplyInvoiceRecordsRequest) GetPeriod() time.Time { + if m != nil { + return m.Period + } + return time.Time{} +} + type ApplyInvoiceRecordsResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -155,9 +163,10 @@ func (m *ApplyInvoiceRecordsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ApplyInvoiceRecordsResponse proto.InternalMessageInfo type ApplyInvoiceCouponsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ApplyInvoiceCouponsRequest) Reset() { *m = ApplyInvoiceCouponsRequest{} } @@ -184,6 +193,13 @@ func (m *ApplyInvoiceCouponsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ApplyInvoiceCouponsRequest proto.InternalMessageInfo +func (m *ApplyInvoiceCouponsRequest) GetPeriod() time.Time { + if m != nil { + return m.Period + } + return time.Time{} +} + type ApplyInvoiceCouponsResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -215,9 +231,10 @@ func (m *ApplyInvoiceCouponsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ApplyInvoiceCouponsResponse proto.InternalMessageInfo type ApplyInvoiceCreditsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ApplyInvoiceCreditsRequest) Reset() { *m = ApplyInvoiceCreditsRequest{} } @@ -244,6 +261,13 @@ func (m *ApplyInvoiceCreditsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ApplyInvoiceCreditsRequest proto.InternalMessageInfo +func (m *ApplyInvoiceCreditsRequest) GetPeriod() time.Time { + if m != nil { + return m.Period + } + return time.Time{} +} + type ApplyInvoiceCreditsResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -275,9 +299,10 @@ func (m *ApplyInvoiceCreditsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ApplyInvoiceCreditsResponse proto.InternalMessageInfo type CreateInvoicesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CreateInvoicesRequest) Reset() { *m = CreateInvoicesRequest{} } @@ -304,6 +329,13 @@ func (m *CreateInvoicesRequest) XXX_DiscardUnknown() { var xxx_messageInfo_CreateInvoicesRequest proto.InternalMessageInfo +func (m *CreateInvoicesRequest) GetPeriod() time.Time { + if m != nil { + return m.Period + } + return time.Time{} +} + type CreateInvoicesResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -350,29 +382,29 @@ func init() { func init() { proto.RegisterFile("payments.proto", fileDescriptor_a9566e6e864d2854) } var fileDescriptor_a9566e6e864d2854 = []byte{ - // 341 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x4e, 0xfa, 0x40, - 0x10, 0xc7, 0x7f, 0xe4, 0x97, 0x10, 0x5c, 0x13, 0x0e, 0xab, 0x28, 0x59, 0x21, 0xc5, 0x26, 0x2a, - 0xa7, 0x6d, 0x82, 0x57, 0x2f, 0xc2, 0xc9, 0x1b, 0x21, 0x7a, 0x31, 0x5e, 0x0a, 0x1d, 0x9b, 0x12, - 0xda, 0x59, 0x77, 0x17, 0x13, 0xde, 0xc2, 0xc7, 0xf2, 0x29, 0xf4, 0x51, 0x34, 0xb0, 0x6d, 0x43, - 0x71, 0x17, 0x8e, 0x9d, 0xf9, 0xfe, 0x49, 0xe7, 0x93, 0x25, 0x4d, 0x11, 0xae, 0x52, 0xc8, 0xb4, - 0xe2, 0x42, 0xa2, 0x46, 0x7a, 0x94, 0x61, 0x04, 0x4a, 0x87, 0x5a, 0x31, 0x12, 0x63, 0x8c, 0x66, - 0xcc, 0xbc, 0x18, 0x31, 0x5e, 0x40, 0xb0, 0xf9, 0x9a, 0x2e, 0x5f, 0x03, 0x9d, 0xa4, 0x6b, 0x59, - 0x2a, 0x8c, 0xc0, 0x7f, 0x21, 0x9d, 0xb1, 0x04, 0x11, 0x4a, 0x78, 0xc8, 0xde, 0x31, 0x99, 0xc1, - 0x04, 0x66, 0x28, 0x23, 0x35, 0x81, 0xb7, 0x25, 0x28, 0x4d, 0xef, 0x48, 0x5d, 0x80, 0x4c, 0x30, - 0x6a, 0xd7, 0x7a, 0xb5, 0xfe, 0xf1, 0x80, 0x71, 0x93, 0xc8, 0x8b, 0x44, 0xfe, 0x58, 0x24, 0x0e, - 0x1b, 0x9f, 0x5f, 0xde, 0xbf, 0x8f, 0x6f, 0xaf, 0x36, 0xc9, 0x3d, 0xbe, 0x47, 0xba, 0x8e, 0x74, - 0x25, 0x30, 0x53, 0xe0, 0x77, 0x08, 0xbb, 0x17, 0x62, 0xb1, 0xb2, 0x96, 0xfb, 0x5d, 0x72, 0x61, - 0xdd, 0xda, 0xcd, 0x23, 0x5c, 0xae, 0xe7, 0x0e, 0x73, 0xb9, 0x75, 0x98, 0x25, 0x44, 0x89, 0x76, - 0x9a, 0x8b, 0x6d, 0x6e, 0x3e, 0x27, 0xad, 0x91, 0x84, 0x50, 0x17, 0xbf, 0x55, 0xfa, 0xda, 0xe4, - 0x6c, 0x77, 0x61, 0x2c, 0x83, 0x9f, 0xff, 0xa4, 0x31, 0xce, 0x99, 0xd1, 0x39, 0x69, 0x59, 0xef, - 0x42, 0x6f, 0x78, 0xc9, 0x91, 0xef, 0xe3, 0xc2, 0xfa, 0x87, 0x85, 0xa6, 0x98, 0x46, 0xe4, 0xc4, - 0x72, 0x44, 0x7a, 0xb5, 0x15, 0xe0, 0x46, 0xc0, 0xae, 0x0f, 0xc9, 0xec, 0x2d, 0xf9, 0xb5, 0x9d, - 0x2d, 0x55, 0x56, 0xce, 0x96, 0x1d, 0x68, 0x7f, 0x5a, 0x0c, 0x16, 0x77, 0x4b, 0x05, 0xaa, 0xbb, - 0xa5, 0x4a, 0x97, 0x3e, 0x91, 0x66, 0x15, 0x22, 0xed, 0x6d, 0x39, 0xad, 0xe0, 0xd9, 0xe5, 0x1e, - 0x85, 0x89, 0x1d, 0x9e, 0x3e, 0x53, 0xa5, 0x51, 0xce, 0x79, 0x82, 0xc1, 0x0c, 0xd3, 0x14, 0xb3, - 0x40, 0x4c, 0xa7, 0xf5, 0xcd, 0x43, 0xba, 0xfd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x32, 0x6d, 0x84, - 0xad, 0xd1, 0x03, 0x00, 0x00, + // 342 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x25, 0x26, 0x04, 0x9f, 0x09, 0x8b, 0x51, 0x0c, 0xa9, 0x92, 0x22, 0x89, 0xca, 0x6a, + 0x9a, 0xe0, 0xd6, 0x8d, 0xb0, 0x72, 0x47, 0x88, 0x6c, 0x88, 0x9b, 0x42, 0x9f, 0x4d, 0x09, 0xed, + 0x1b, 0x67, 0x06, 0x13, 0x6e, 0xe1, 0xb1, 0x3c, 0x85, 0x1e, 0x45, 0x03, 0xd3, 0x12, 0x8a, 0x1d, + 0xd8, 0xc8, 0xb2, 0xcd, 0x9b, 0xef, 0x5b, 0xfc, 0x1f, 0x54, 0x85, 0xbf, 0x88, 0x31, 0xd1, 0x8a, + 0x0b, 0x49, 0x9a, 0xd8, 0x49, 0x42, 0x01, 0x2a, 0xed, 0x6b, 0xe5, 0x40, 0x48, 0x21, 0x99, 0xdf, + 0x8e, 0x1b, 0x12, 0x85, 0x33, 0xf4, 0x56, 0x5f, 0xe3, 0xf9, 0xab, 0xa7, 0xa3, 0x78, 0x79, 0x16, + 0x0b, 0x73, 0xd0, 0x7a, 0x81, 0xab, 0xbe, 0x44, 0xe1, 0x4b, 0x7c, 0x4a, 0xde, 0x29, 0x9a, 0xe0, + 0x00, 0x27, 0x24, 0x03, 0x35, 0xc0, 0xb7, 0x39, 0x2a, 0xcd, 0x1e, 0xa0, 0x2c, 0x50, 0x46, 0x14, + 0xd4, 0x4b, 0xcd, 0x52, 0xfb, 0xb4, 0xe3, 0x70, 0x43, 0xe4, 0x19, 0x91, 0x3f, 0x67, 0xc4, 0x6e, + 0xe5, 0xf3, 0xcb, 0x3d, 0xfa, 0xf8, 0x76, 0x4b, 0x83, 0xf4, 0x4d, 0xcb, 0x85, 0x86, 0x85, 0xae, + 0x04, 0x25, 0x0a, 0x5b, 0x23, 0x70, 0x1e, 0x85, 0x98, 0x2d, 0x0e, 0x21, 0x6f, 0xc0, 0x65, 0x21, + 0xbb, 0x58, 0xdd, 0xa3, 0xf9, 0xf2, 0xff, 0x41, 0xd4, 0x6b, 0xb6, 0x45, 0x2d, 0x31, 0x88, 0xf4, + 0x81, 0xd4, 0x19, 0x3b, 0x55, 0x0f, 0xa1, 0xd6, 0x93, 0xe8, 0xeb, 0x6c, 0x90, 0x7f, 0xb2, 0xd6, + 0xe1, 0x62, 0x1b, 0x6b, 0x84, 0x9d, 0x9f, 0x63, 0xa8, 0xf4, 0xd3, 0x56, 0xd9, 0x14, 0x6a, 0x85, + 0x3d, 0xb0, 0x3b, 0xbe, 0xee, 0x97, 0xef, 0xea, 0xd1, 0x69, 0xef, 0x3f, 0x34, 0x62, 0x16, 0xc0, + 0x59, 0xc1, 0xfc, 0xec, 0x66, 0x03, 0x60, 0x4f, 0xcf, 0xb9, 0xdd, 0x77, 0x56, 0x6c, 0x49, 0x97, + 0xb6, 0x5a, 0xf2, 0x95, 0x59, 0x2d, 0x5b, 0xc1, 0xfc, 0xb1, 0x98, 0x51, 0xed, 0x96, 0x5c, 0x50, + 0x76, 0x4b, 0xbe, 0x0d, 0x36, 0x84, 0x6a, 0x7e, 0x44, 0xd6, 0xdc, 0x78, 0x59, 0x98, 0x8d, 0x73, + 0xbd, 0xe3, 0xc2, 0x60, 0xbb, 0xe7, 0x23, 0xa6, 0x34, 0xc9, 0x29, 0x8f, 0xc8, 0x9b, 0x50, 0x1c, + 0x53, 0xe2, 0x89, 0xf1, 0xb8, 0xbc, 0xea, 0xea, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xde, + 0x0b, 0xc5, 0xc9, 0x04, 0x00, 0x00, } // --- DRPC BEGIN --- diff --git a/vendor/storj.io/common/pb/payments.proto b/vendor/storj.io/common/pb/payments.proto index c4942d568..e7d173cca 100644 --- a/vendor/storj.io/common/pb/payments.proto +++ b/vendor/storj.io/common/pb/payments.proto @@ -23,14 +23,26 @@ message PrepareInvoiceRecordsRequest { message PrepareInvoiceRecordsResponse {} -message ApplyInvoiceRecordsRequest {} +message ApplyInvoiceRecordsRequest { + google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + message ApplyInvoiceRecordsResponse {} -message ApplyInvoiceCouponsRequest {} +message ApplyInvoiceCouponsRequest { + google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + message ApplyInvoiceCouponsResponse {} -message ApplyInvoiceCreditsRequest {} +message ApplyInvoiceCreditsRequest { + google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + message ApplyInvoiceCreditsResponse {} -message CreateInvoicesRequest {} +message CreateInvoicesRequest { + google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + message CreateInvoicesResponse {} diff --git a/vendor/storj.io/common/pb/piecestore2.pb.go b/vendor/storj.io/common/pb/piecestore2.pb.go index 20acb0696..04cecdf71 100644 --- a/vendor/storj.io/common/pb/piecestore2.pb.go +++ b/vendor/storj.io/common/pb/piecestore2.pb.go @@ -58,7 +58,6 @@ func (PieceHeader_FormatVersion) EnumDescriptor() ([]byte, []int) { // Chunk -> // PieceHash signed by uplink -> // <- PieceHash signed by storage node -// type PieceUploadRequest struct { // first message to show that we are allowed to upload Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` diff --git a/vendor/storj.io/common/pb/piecestore2.proto b/vendor/storj.io/common/pb/piecestore2.proto index 7ea2352af..488cce3b7 100644 --- a/vendor/storj.io/common/pb/piecestore2.proto +++ b/vendor/storj.io/common/pb/piecestore2.proto @@ -29,7 +29,6 @@ service Piecestore { // Chunk -> // PieceHash signed by uplink -> // <- PieceHash signed by storage node -// message PieceUploadRequest { // first message to show that we are allowed to upload orders.OrderLimit limit = 1; diff --git a/vendor/storj.io/common/pb/pointerdb.pb.go b/vendor/storj.io/common/pb/pointerdb.pb.go index 230187e96..cb7775180 100644 --- a/vendor/storj.io/common/pb/pointerdb.pb.go +++ b/vendor/storj.io/common/pb/pointerdb.pb.go @@ -364,7 +364,6 @@ func (m *Pointer) GetPieceHashesVerified() bool { return false } -// ListResponse is a response message for the List rpc call type ListResponse struct { Items []*ListResponse_Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` diff --git a/vendor/storj.io/common/pb/pointerdb.proto b/vendor/storj.io/common/pb/pointerdb.proto index c0896f231..e37c1a325 100644 --- a/vendor/storj.io/common/pb/pointerdb.proto +++ b/vendor/storj.io/common/pb/pointerdb.proto @@ -61,7 +61,6 @@ message Pointer { bool piece_hashes_verified = 11; } -// ListResponse is a response message for the List rpc call message ListResponse { message Item { string path = 1; diff --git a/vendor/storj.io/common/pb/referralmanager.proto b/vendor/storj.io/common/pb/referralmanager.proto index 96230624b..4dc950055 100644 --- a/vendor/storj.io/common/pb/referralmanager.proto +++ b/vendor/storj.io/common/pb/referralmanager.proto @@ -5,7 +5,7 @@ import "gogo.proto"; package referralmanager; -// ReferralManager is a service for handling referrals +// ReferralManager is a service for handling referrals. service ReferralManager { // GetTokens retrieves a list of unredeemed tokens for a user rpc GetTokens(GetTokensRequest) returns (GetTokensResponse); diff --git a/vendor/storj.io/common/pb/types.go b/vendor/storj.io/common/pb/types.go index 970a0eddb..7a5591f72 100644 --- a/vendor/storj.io/common/pb/types.go +++ b/vendor/storj.io/common/pb/types.go @@ -5,32 +5,32 @@ package pb import "storj.io/common/storj" -// Path represents a object path +// Path represents a object path. type Path = storj.Path -// NodeID is an alias to storj.NodeID for use in generated protobuf code +// NodeID is an alias to storj.NodeID for use in generated protobuf code. type NodeID = storj.NodeID -// NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code +// NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code. type NodeIDList = storj.NodeIDList -// PieceID is an alias to storj.PieceID for use in generated protobuf code +// PieceID is an alias to storj.PieceID for use in generated protobuf code. type PieceID = storj.PieceID -// PiecePublicKey is an alias to storj.PiecePublicKey for use in generated protobuf code +// PiecePublicKey is an alias to storj.PiecePublicKey for use in generated protobuf code. type PiecePublicKey = storj.PiecePublicKey -// PiecePrivateKey is an alias to storj.PiecePrivateKey for use in generated protobuf code +// PiecePrivateKey is an alias to storj.PiecePrivateKey for use in generated protobuf code. type PiecePrivateKey = storj.PiecePrivateKey -// SerialNumber is an alias to storj.SerialNumber for use in generated protobuf code +// SerialNumber is an alias to storj.SerialNumber for use in generated protobuf code. type SerialNumber = storj.SerialNumber -// StreamID is an alias to storj.StreamID for use in generated protobuf code +// StreamID is an alias to storj.StreamID for use in generated protobuf code. type StreamID = storj.StreamID -// Nonce is an alias to storj.Nonce for use in generated protobuf code +// Nonce is an alias to storj.Nonce for use in generated protobuf code. type Nonce = storj.Nonce -// SegmentID is an alias to storj.SegmentID for use in generated protobuf code +// SegmentID is an alias to storj.SegmentID for use in generated protobuf code. type SegmentID = storj.SegmentID diff --git a/vendor/storj.io/common/pb/utils.go b/vendor/storj.io/common/pb/utils.go index 3ec02eabf..e37d1ac71 100644 --- a/vendor/storj.io/common/pb/utils.go +++ b/vendor/storj.io/common/pb/utils.go @@ -12,7 +12,7 @@ import ( "storj.io/common/storj" ) -// Equal compares two Protobuf messages via serialization +// Equal compares two Protobuf messages via serialization. func Equal(msg1, msg2 proto.Message) bool { //reflect.DeepEqual and proto.Equal don't seem work in all cases //todo: see how slow this is compared to custom equality checks @@ -33,7 +33,7 @@ func Equal(msg1, msg2 proto.Message) bool { return bytes.Equal(msg1Bytes, msg2Bytes) } -// NodesToIDs extracts Node-s into a list of ids +// NodesToIDs extracts Node-s into a list of ids. func NodesToIDs(nodes []*Node) storj.NodeIDList { ids := make(storj.NodeIDList, len(nodes)) for i, node := range nodes { @@ -47,7 +47,7 @@ func NodesToIDs(nodes []*Node) storj.NodeIDList { // CopyNode returns a deep copy of a node // It would be better to use `proto.Clone` but it is curently incompatible // with gogo's customtype extension. -// (see https://github.com/gogo/protobuf/issues/147) +// (see https://github.com/gogo/protobuf/issues/147). func CopyNode(src *Node) (dst *Node) { node := Node{Id: storj.NodeID{}} copy(node.Id[:], src.Id[:]) @@ -62,7 +62,7 @@ func CopyNode(src *Node) (dst *Node) { return &node } -// AddressEqual compares two node addresses +// AddressEqual compares two node addresses. func AddressEqual(a1, a2 *NodeAddress) bool { if a1 == nil && a2 == nil { return true diff --git a/vendor/storj.io/common/pb/vouchers.pb.go b/vendor/storj.io/common/pb/vouchers.pb.go index a7dbbd9e1..b771457e3 100644 --- a/vendor/storj.io/common/pb/vouchers.pb.go +++ b/vendor/storj.io/common/pb/vouchers.pb.go @@ -54,7 +54,7 @@ func (VoucherResponse_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor_3659b9a115b8060d, []int{2, 0} } -// Voucher is a signed message verifying that a node has been vetted by a particular satellite +// Voucher is a signed message verifying that a node has been vetted by a particular satellite. type Voucher struct { SatelliteId NodeID `protobuf:"bytes,1,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"` StorageNodeId NodeID `protobuf:"bytes,2,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"` diff --git a/vendor/storj.io/common/pb/vouchers.proto b/vendor/storj.io/common/pb/vouchers.proto index 817ee8abd..e4ba38a52 100644 --- a/vendor/storj.io/common/pb/vouchers.proto +++ b/vendor/storj.io/common/pb/vouchers.proto @@ -9,7 +9,7 @@ package vouchers; import "gogo.proto"; import "google/protobuf/timestamp.proto"; -// Voucher is a signed message verifying that a node has been vetted by a particular satellite +// Voucher is a signed message verifying that a node has been vetted by a particular satellite. message Voucher { bytes satellite_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; bytes storage_node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; diff --git a/vendor/storj.io/common/peertls/extensions/extensions.go b/vendor/storj.io/common/peertls/extensions/extensions.go index c561cc4ad..2bfda7d46 100644 --- a/vendor/storj.io/common/peertls/extensions/extensions.go +++ b/vendor/storj.io/common/peertls/extensions/extensions.go @@ -64,7 +64,7 @@ var ( type ExtensionID = asn1.ObjectIdentifier // Config is used to bind cli flags for determining which extensions will -// be used by the server +// be used by the server. type Config struct { Revocation bool `default:"true" help:"if true, client leaves may contain the most recent certificate revocation for the current certificate"` WhitelistSignedLeaf bool `default:"false" help:"if true, client leaves must contain a valid \"signed certificate extension\" (NB: verified against certs in the peer ca whitelist; i.e. if true, a whitelist must be provided)"` diff --git a/vendor/storj.io/common/peertls/extensions/revocations.go b/vendor/storj.io/common/peertls/extensions/revocations.go index 92019cda2..4b6035442 100644 --- a/vendor/storj.io/common/peertls/extensions/revocations.go +++ b/vendor/storj.io/common/peertls/extensions/revocations.go @@ -27,16 +27,16 @@ var ( RevocationUpdateHandler = NewHandlerFactory(&RevocationExtID, revocationUpdater) ) -// ErrRevocation is used when an error occurs involving a certificate revocation +// ErrRevocation is used when an error occurs involving a certificate revocation. var ErrRevocation = errs.Class("revocation processing error") -// ErrRevocationDB is used when an error occurs involving the revocations database +// ErrRevocationDB is used when an error occurs involving the revocations database. var ErrRevocationDB = errs.Class("revocation database error") -// ErrRevokedCert is used when a certificate in the chain is revoked and not expected to be +// ErrRevokedCert is used when a certificate in the chain is revoked and not expected to be. var ErrRevokedCert = ErrRevocation.New("a certificate in the chain is revoked") -// ErrRevocationTimestamp is used when a revocation's timestamp is older than the last recorded revocation +// ErrRevocationTimestamp is used when a revocation's timestamp is older than the last recorded revocation. var ErrRevocationTimestamp = Error.New("revocation timestamp is older than last known revocation") // Revocation represents a certificate revocation for storage in the revocation @@ -161,12 +161,12 @@ func (r *Revocation) Sign(key crypto.PrivateKey) error { return nil } -// Marshal serializes a revocation to bytes +// Marshal serializes a revocation to bytes. func (r Revocation) Marshal() ([]byte, error) { return (&revocationEncoder{}).encode(r) } -// Unmarshal deserializes a revocation from bytes +// Unmarshal deserializes a revocation from bytes. func (r *Revocation) Unmarshal(data []byte) error { revocation, err := (&revocationDecoder{}).decode(data) if err != nil { diff --git a/vendor/storj.io/common/peertls/templates.go b/vendor/storj.io/common/peertls/templates.go index 26992c27f..ca1d75c85 100644 --- a/vendor/storj.io/common/peertls/templates.go +++ b/vendor/storj.io/common/peertls/templates.go @@ -8,7 +8,7 @@ import ( "crypto/x509/pkix" ) -// CATemplate returns x509.Certificate template for certificate authority +// CATemplate returns x509.Certificate template for certificate authority. func CATemplate() (*x509.Certificate, error) { serialNumber, err := newSerialNumber() if err != nil { @@ -26,7 +26,7 @@ func CATemplate() (*x509.Certificate, error) { return template, nil } -// LeafTemplate returns x509.Certificate template for signing and encrypting +// LeafTemplate returns x509.Certificate template for signing and encrypting. func LeafTemplate() (*x509.Certificate, error) { serialNumber, err := newSerialNumber() if err != nil { diff --git a/vendor/storj.io/common/peertls/tlsopts/config.go b/vendor/storj.io/common/peertls/tlsopts/config.go index 115d358df..14d2e785f 100644 --- a/vendor/storj.io/common/peertls/tlsopts/config.go +++ b/vendor/storj.io/common/peertls/tlsopts/config.go @@ -7,7 +7,7 @@ import ( "storj.io/common/peertls/extensions" ) -// Config holds tls configuration parameters +// Config holds tls configuration parameters. type Config struct { RevocationDBURL string `default:"bolt://$CONFDIR/revocations.db" help:"url for revocation database (e.g. bolt://some.db OR redis://127.0.0.1:6378?db=2&password=abc123)"` PeerCAWhitelistPath string `help:"path to the CA cert whitelist (peer identities must be signed by one these to be verified). this will override the default peer whitelist"` diff --git a/vendor/storj.io/common/peertls/tlsopts/tls.go b/vendor/storj.io/common/peertls/tlsopts/tls.go index 7e6b0a87f..61909e831 100644 --- a/vendor/storj.io/common/peertls/tlsopts/tls.go +++ b/vendor/storj.io/common/peertls/tlsopts/tls.go @@ -24,7 +24,7 @@ func (opts *Options) ClientTLSConfig(id storj.NodeID) *tls.Config { } // ClientTLSConfigPrefix returns a TSLConfig for use as a client in handshaking with a peer. -// The peer node id is validated to match the given prefix +// The peer node id is validated to match the given prefix. func (opts *Options) ClientTLSConfigPrefix(idPrefix string) *tls.Config { return opts.tlsConfig(false, verifyIdentityPrefix(idPrefix)) } diff --git a/vendor/storj.io/common/peertls/utils.go b/vendor/storj.io/common/peertls/utils.go index eb0d5df88..c0ae6ec30 100644 --- a/vendor/storj.io/common/peertls/utils.go +++ b/vendor/storj.io/common/peertls/utils.go @@ -49,12 +49,12 @@ func DoubleSHA256PublicKey(k crypto.PublicKey) ([sha256.Size]byte, error) { return end, nil } -// Temporary returns false to indicate that is is a non-temporary error +// Temporary returns false to indicate that is is a non-temporary error. func (nte NonTemporaryError) Temporary() bool { return false } -// Err returns the underlying error +// Err returns the underlying error. func (nte NonTemporaryError) Err() error { return nte.error } diff --git a/vendor/storj.io/common/pkcrypto/hashing.go b/vendor/storj.io/common/pkcrypto/hashing.go index 815a73e65..71ccd6299 100644 --- a/vendor/storj.io/common/pkcrypto/hashing.go +++ b/vendor/storj.io/common/pkcrypto/hashing.go @@ -14,7 +14,7 @@ func NewHash() hash.Hash { return sha256.New() } -// SHA256Hash calculates the SHA256 hash of the input data +// SHA256Hash calculates the SHA256 hash of the input data. func SHA256Hash(data []byte) []byte { sum := sha256.Sum256(data) return sum[:] diff --git a/vendor/storj.io/common/pkcrypto/signing.go b/vendor/storj.io/common/pkcrypto/signing.go index a4c4466cf..c7c333c2c 100644 --- a/vendor/storj.io/common/pkcrypto/signing.go +++ b/vendor/storj.io/common/pkcrypto/signing.go @@ -33,18 +33,18 @@ var ( } ) -// GeneratePrivateKey returns a new PrivateKey for signing messages +// GeneratePrivateKey returns a new PrivateKey for signing messages. func GeneratePrivateKey() (crypto.PrivateKey, error) { return GeneratePrivateECDSAKey(authECCurve) // return GeneratePrivateRSAKey(StorjRSAKeyBits) } -// GeneratePrivateECDSAKey returns a new private ECDSA key for signing messages +// GeneratePrivateECDSAKey returns a new private ECDSA key for signing messages. func GeneratePrivateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) { return ecdsa.GenerateKey(curve, rand.Reader) } -// GeneratePrivateRSAKey returns a new private RSA key for signing messages +// GeneratePrivateRSAKey returns a new private RSA key for signing messages. func GeneratePrivateRSAKey(bits int) (*rsa.PrivateKey, error) { return rsa.GenerateKey(rand.Reader, bits) } diff --git a/vendor/storj.io/common/ranger/common.go b/vendor/storj.io/common/ranger/common.go index 98c25f790..a3eae7c2c 100644 --- a/vendor/storj.io/common/ranger/common.go +++ b/vendor/storj.io/common/ranger/common.go @@ -8,7 +8,7 @@ import ( "github.com/zeebo/errs" ) -// Error is the errs class of standard Ranger errors +// Error is the errs class of standard Ranger errors. var Error = errs.Class("ranger error") var mon = monkit.Package() diff --git a/vendor/storj.io/common/ranger/reader.go b/vendor/storj.io/common/ranger/reader.go index 14a3d3b04..a002f45c7 100644 --- a/vendor/storj.io/common/ranger/reader.go +++ b/vendor/storj.io/common/ranger/reader.go @@ -20,13 +20,13 @@ type Ranger interface { Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) } -// ByteRanger turns a byte slice into a Ranger +// ByteRanger turns a byte slice into a Ranger. type ByteRanger []byte -// Size implements Ranger.Size +// Size implements Ranger.Size. func (b ByteRanger) Size() int64 { return int64(len(b)) } -// Range implements Ranger.Range +// Range implements Ranger.Range. func (b ByteRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { defer mon.Task()(&ctx)(&err) if offset < 0 { @@ -75,7 +75,7 @@ func concat2(r1, r2 Ranger) Ranger { return &concatReader{r1: r1, r2: r2} } -// Concat concatenates Rangers +// Concat concatenates Rangers. func Concat(r ...Ranger) Ranger { switch len(r) { case 0: diff --git a/vendor/storj.io/common/ranger/readerat.go b/vendor/storj.io/common/ranger/readerat.go index cd44367ec..be8b8617d 100644 --- a/vendor/storj.io/common/ranger/readerat.go +++ b/vendor/storj.io/common/ranger/readerat.go @@ -13,7 +13,7 @@ type readerAtRanger struct { size int64 } -// ReaderAtRanger converts a ReaderAt with a given size to a Ranger +// ReaderAtRanger converts a ReaderAt with a given size to a Ranger. func ReaderAtRanger(r io.ReaderAt, size int64) Ranger { return &readerAtRanger{ r: r, diff --git a/vendor/storj.io/common/rpc/common.go b/vendor/storj.io/common/rpc/common.go index cd2935174..f1079e3b0 100644 --- a/vendor/storj.io/common/rpc/common.go +++ b/vendor/storj.io/common/rpc/common.go @@ -18,9 +18,6 @@ import ( const ( // IsDRPC is true if drpc is being used. IsDRPC = true - - // IsGRPC is true if grpc is being used. - IsGRPC = false ) var mon = monkit.Package() diff --git a/vendor/storj.io/common/rpc/dial.go b/vendor/storj.io/common/rpc/dial.go index 7ca3221c7..27f53b731 100644 --- a/vendor/storj.io/common/rpc/dial.go +++ b/vendor/storj.io/common/rpc/dial.go @@ -37,6 +37,13 @@ func NewDefaultManagerOptions() drpcmanager.Options { } } +// Transport is a type that creates net.Conns, given an address. +// net.Dialer implements this interface and is used by default. +type Transport interface { + // DialContext is called to establish a connection. + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + // Dialer holds configuration for dialing. type Dialer struct { // TLSOptions controls the tls options for dialing. If it is nil, only @@ -65,6 +72,9 @@ type Dialer struct { // socket option on dialed connections. Only valid on linux. Only set // if positive. TCPUserTimeout time.Duration + + // Transport is how sockets are opened. If nil, net.Dialer is used. + Transport Transport } // NewDefaultDialer returns a Dialer with default timeouts set. @@ -96,7 +106,12 @@ func (d Dialer) dialContext(ctx context.Context, address string) (net.Conn, erro } } - conn, err := new(net.Dialer).DialContext(ctx, "tcp", address) + dialer := d.Transport + if dialer == nil { + dialer = new(net.Dialer) + } + + conn, err := dialer.DialContext(ctx, "tcp", address) if err != nil { // N.B. this error is not wrapped on purpose! grpc code cares about inspecting // it and it's not smart enough to attempt to do any unwrapping. :( Additionally @@ -142,7 +157,7 @@ func (d Dialer) DialNode(ctx context.Context, node *pb.Node) (_ *Conn, err error // DialAddressID dials to the specified address and asserts it has the given node id. func (d Dialer) DialAddressID(ctx context.Context, address string, id storj.NodeID) (_ *Conn, err error) { - defer mon.Task()(&ctx)(&err) + defer mon.Task()(&ctx, "node: "+id.String()[0:8])(&err) if d.TLSOptions == nil { return nil, Error.New("tls options not set when required for this dial") @@ -151,6 +166,17 @@ func (d Dialer) DialAddressID(ctx context.Context, address string, id storj.Node return d.dial(ctx, address, d.TLSOptions.ClientTLSConfig(id)) } +// DialNodeURL dials to the specified node url and asserts it has the given node id. +func (d Dialer) DialNodeURL(ctx context.Context, nodeURL storj.NodeURL) (_ *Conn, err error) { + defer mon.Task()(&ctx, "node: "+nodeURL.ID.String()[0:8])(&err) + + if d.TLSOptions == nil { + return nil, Error.New("tls options not set when required for this dial") + } + + return d.dial(ctx, nodeURL.Address, d.TLSOptions.ClientTLSConfig(nodeURL.ID)) +} + // DialAddressInsecureBestEffort is like DialAddressInsecure but tries to dial a node securely if // it can. // @@ -330,7 +356,7 @@ type tlsConnWrapper struct { underlying net.Conn } -// Close closes the underlying connection +// Close closes the underlying connection. func (t *tlsConnWrapper) Close() error { return t.underlying.Close() } // drpcHeaderConn fulfills the net.Conn interface. On the first call to Write @@ -340,7 +366,7 @@ type drpcHeaderConn struct { once sync.Once } -// newDrpcHeaderConn returns a new *drpcHeaderConn +// newDrpcHeaderConn returns a new *drpcHeaderConn. func newDrpcHeaderConn(conn net.Conn) *drpcHeaderConn { return &drpcHeaderConn{ Conn: conn, diff --git a/vendor/storj.io/common/rpc/known_ids.go b/vendor/storj.io/common/rpc/known_ids.go index 9f7a1e78d..55c188c07 100644 --- a/vendor/storj.io/common/rpc/known_ids.go +++ b/vendor/storj.io/common/rpc/known_ids.go @@ -43,7 +43,7 @@ func init() { } } -// KnownNodeID looks for a well-known node id for a given address +// KnownNodeID looks for a well-known node id for a given address. func KnownNodeID(address string) (id storj.NodeID, known bool) { id, known = knownNodeIDs[address] if !known { diff --git a/vendor/storj.io/common/rpc/rpcpeer/peer.go b/vendor/storj.io/common/rpc/rpcpeer/peer.go index 6d6c0c1bc..477cc83f5 100644 --- a/vendor/storj.io/common/rpc/rpcpeer/peer.go +++ b/vendor/storj.io/common/rpc/rpcpeer/peer.go @@ -11,7 +11,6 @@ import ( "github.com/zeebo/errs" - "storj.io/common/internal/grpchook" "storj.io/drpc/drpcctx" ) @@ -38,13 +37,8 @@ func FromContext(ctx context.Context) (*Peer, error) { return peer, nil } else if peer, drpcErr := drpcInternalFromContext(ctx); drpcErr == nil { return peer, nil - } else if addr, state, grpcErr := grpchook.InternalFromContext(ctx); grpcErr == nil { - return &Peer{Addr: addr, State: state}, nil } else { - if grpcErr == grpchook.ErrNotHooked { - grpcErr = nil - } - return nil, errs.Combine(drpcErr, grpcErr) + return nil, drpcErr } } diff --git a/vendor/storj.io/common/rpc/rpcpool/pool.go b/vendor/storj.io/common/rpc/rpcpool/pool.go index c6d57effc..9562086bf 100644 --- a/vendor/storj.io/common/rpc/rpcpool/pool.go +++ b/vendor/storj.io/common/rpc/rpcpool/pool.go @@ -23,7 +23,7 @@ var mon = monkit.Package() // we may want to keep. that adds quite a bit of complexity because channels // do not support removing buffered elements, so it didn't seem worth it. -// expiringConn wraps a connection +// expiringConn wraps a connection. type expiringConn struct { conn *drpcconn.Conn timer *time.Timer @@ -39,6 +39,11 @@ func newExpiringConn(conn *drpcconn.Conn, dur time.Duration) *expiringConn { return ex } +// Closed returns true if the connection is already closed. +func (ex *expiringConn) Closed() bool { + return ex.conn.Closed() +} + // Cancel attempts to cancel the expiration timer and returns true if the // timer will not close the connection. func (ex *expiringConn) Cancel() bool { @@ -107,6 +112,16 @@ func (c *Conn) Close() (err error) { return err } +// Closed returns true if the connection is already closed. +func (c *Conn) Closed() bool { + select { + case <-c.done: + return true + default: + return false + } +} + // newConn creates a new connection using the dialer. func (c *Conn) newConn(ctx context.Context) (_ *drpcconn.Conn, err error) { defer mon.Task()(&ctx)(&err) diff --git a/vendor/storj.io/common/rpc/rpcstatus/status.go b/vendor/storj.io/common/rpc/rpcstatus/status.go index a287d83e2..a00b4cc22 100644 --- a/vendor/storj.io/common/rpc/rpcstatus/status.go +++ b/vendor/storj.io/common/rpc/rpcstatus/status.go @@ -10,7 +10,6 @@ import ( "github.com/zeebo/errs" - "storj.io/common/internal/grpchook" "storj.io/drpc/drpcerr" ) @@ -55,13 +54,6 @@ func Code(err error) StatusCode { return code } - // If we have grpc attached try to get grpc code if possible. - if grpchook.HookedConvertToStatusCode != nil { - if code, ok := grpchook.HookedConvertToStatusCode(err); ok { - return StatusCode(code) - } - } - return Unknown } } @@ -72,11 +64,6 @@ func Wrap(code StatusCode, err error) error { return nil } - // Should we also handle grpc error status codes. - if grpchook.HookedErrorWrap != nil { - return grpchook.HookedErrorWrap(grpchook.StatusCode(code), err) - } - ce := &codeErr{ code: code, } @@ -95,7 +82,7 @@ func Error(code StatusCode, msg string) error { return Wrap(code, errs.New("%s", msg)) } -// Errorf : Error :: fmt.Sprintf : fmt.Sprint +// Errorf : Error :: fmt.Sprintf : fmt.Sprint. func Errorf(code StatusCode, format string, a ...interface{}) error { return Wrap(code, errs.New(format, a...)) } diff --git a/vendor/storj.io/common/signing/sign.go b/vendor/storj.io/common/signing/sign.go index 40a64f353..bf664bd8e 100644 --- a/vendor/storj.io/common/signing/sign.go +++ b/vendor/storj.io/common/signing/sign.go @@ -92,8 +92,8 @@ func SignUplinkPieceHash(ctx context.Context, privateKey storj.PiecePrivateKey, return &signed, nil } -// SignStreamID signs the stream ID using the specified signer -// Signer is a satellite +// SignStreamID signs the stream ID using the specified signer. +// Signer is a satellite. func SignStreamID(ctx context.Context, signer Signer, unsigned *pb.SatStreamID) (_ *pb.SatStreamID, err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeStreamID(ctx, unsigned) @@ -110,8 +110,8 @@ func SignStreamID(ctx context.Context, signer Signer, unsigned *pb.SatStreamID) return &signed, nil } -// SignSegmentID signs the segment ID using the specified signer -// Signer is a satellite +// SignSegmentID signs the segment ID using the specified signer. +// Signer is a satellite. func SignSegmentID(ctx context.Context, signer Signer, unsigned *pb.SatSegmentID) (_ *pb.SatSegmentID, err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeSegmentID(ctx, unsigned) @@ -128,8 +128,8 @@ func SignSegmentID(ctx context.Context, signer Signer, unsigned *pb.SatSegmentID return &signed, nil } -// SignExitCompleted signs the ExitCompleted using the specified signer -// Signer is a satellite +// SignExitCompleted signs the ExitCompleted using the specified signer. +// Signer is a satellite. func SignExitCompleted(ctx context.Context, signer Signer, unsigned *pb.ExitCompleted) (_ *pb.ExitCompleted, err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeExitCompleted(ctx, unsigned) @@ -146,8 +146,8 @@ func SignExitCompleted(ctx context.Context, signer Signer, unsigned *pb.ExitComp return &signed, nil } -// SignExitFailed signs the ExitFailed using the specified signer -// Signer is a satellite +// SignExitFailed signs the ExitFailed using the specified signer. +// Signer is a satellite. func SignExitFailed(ctx context.Context, signer Signer, unsigned *pb.ExitFailed) (_ *pb.ExitFailed, err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeExitFailed(ctx, unsigned) diff --git a/vendor/storj.io/common/signing/verify.go b/vendor/storj.io/common/signing/verify.go index 086d0860a..a1ef224be 100644 --- a/vendor/storj.io/common/signing/verify.go +++ b/vendor/storj.io/common/signing/verify.go @@ -72,7 +72,7 @@ func VerifyUplinkPieceHashSignature(ctx context.Context, publicKey storj.PiecePu return Error.Wrap(publicKey.Verify(bytes, signed.Signature)) } -// VerifyStreamID verifies that the signature inside stream ID belongs to the satellite +// VerifyStreamID verifies that the signature inside stream ID belongs to the satellite. func VerifyStreamID(ctx context.Context, satellite Signee, signed *pb.SatStreamID) (err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeStreamID(ctx, signed) @@ -83,7 +83,7 @@ func VerifyStreamID(ctx context.Context, satellite Signee, signed *pb.SatStreamI return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature) } -// VerifySegmentID verifies that the signature inside segment ID belongs to the satellite +// VerifySegmentID verifies that the signature inside segment ID belongs to the satellite. func VerifySegmentID(ctx context.Context, satellite Signee, signed *pb.SatSegmentID) (err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeSegmentID(ctx, signed) @@ -94,7 +94,7 @@ func VerifySegmentID(ctx context.Context, satellite Signee, signed *pb.SatSegmen return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature) } -// VerifyExitCompleted verifies that the signature inside ExitCompleted belongs to the satellite +// VerifyExitCompleted verifies that the signature inside ExitCompleted belongs to the satellite. func VerifyExitCompleted(ctx context.Context, satellite Signee, signed *pb.ExitCompleted) (err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeExitCompleted(ctx, signed) @@ -105,7 +105,7 @@ func VerifyExitCompleted(ctx context.Context, satellite Signee, signed *pb.ExitC return Error.Wrap(satellite.HashAndVerifySignature(ctx, bytes, signed.ExitCompleteSignature)) } -// VerifyExitFailed verifies that the signature inside ExitFailed belongs to the satellite +// VerifyExitFailed verifies that the signature inside ExitFailed belongs to the satellite. func VerifyExitFailed(ctx context.Context, satellite Signee, signed *pb.ExitFailed) (err error) { defer mon.Task()(&ctx)(&err) bytes, err := EncodeExitFailed(ctx, signed) diff --git a/vendor/storj.io/common/storj/bucket.go b/vendor/storj.io/common/storj/bucket.go index 81208eec9..fc4ddfca6 100644 --- a/vendor/storj.io/common/storj/bucket.go +++ b/vendor/storj.io/common/storj/bucket.go @@ -12,17 +12,17 @@ import ( ) var ( - // ErrBucket is an error class for general bucket errors + // ErrBucket is an error class for general bucket errors. ErrBucket = errs.Class("bucket") - // ErrNoBucket is an error class for using empty bucket name + // ErrNoBucket is an error class for using empty bucket name. ErrNoBucket = errs.Class("no bucket specified") - // ErrBucketNotFound is an error class for non-existing bucket + // ErrBucketNotFound is an error class for non-existing bucket. ErrBucketNotFound = errs.Class("bucket not found") ) -// Bucket contains information about a specific bucket +// Bucket contains information about a specific bucket. type Bucket struct { ID uuid.UUID Name string diff --git a/vendor/storj.io/common/storj/doc.go b/vendor/storj.io/common/storj/doc.go index 912cb7940..17c20655b 100644 --- a/vendor/storj.io/common/storj/doc.go +++ b/vendor/storj.io/common/storj/doc.go @@ -1,7 +1,5 @@ // Copyright (C) 2019 Storj Labs, Inc. // See LICENSE for copying information. -/*Package storj contains the types which represent the main entities of the -Storj domain. -*/ +// Package storj contains the types which represent the main entities of the Storj domain. package storj diff --git a/vendor/storj.io/common/storj/encryption.go b/vendor/storj.io/common/storj/encryption.go index 0e295f90c..1f521d640 100644 --- a/vendor/storj.io/common/storj/encryption.go +++ b/vendor/storj.io/common/storj/encryption.go @@ -9,7 +9,7 @@ import ( "github.com/zeebo/errs" ) -// EncryptionParameters is the cipher suite and parameters used for encryption +// EncryptionParameters is the cipher suite and parameters used for encryption. type EncryptionParameters struct { // CipherSuite specifies the cipher suite to be used for encryption. CipherSuite CipherSuite @@ -24,7 +24,7 @@ type EncryptionParameters struct { BlockSize int32 } -// IsZero returns true if no field in the struct is set to non-zero value +// IsZero returns true if no field in the struct is set to non-zero value. func (params EncryptionParameters) IsZero() bool { return params == (EncryptionParameters{}) } @@ -45,11 +45,11 @@ const ( // by the NaCl cryptography library under the name "Secretbox". EncSecretBox // EncNullBase64URL is like EncNull but Base64 encodes/decodes the - // binary path data (URL-safe) + // binary path data (URL-safe). EncNullBase64URL ) -// Constant definitions for key and nonce sizes +// Constant definitions for key and nonce sizes. const ( KeySize = 32 NonceSize = 24 @@ -66,29 +66,29 @@ func NewKey(humanReadableKey []byte) (*Key, error) { return &key, nil } -// Key represents the largest key used by any encryption protocol +// Key represents the largest key used by any encryption protocol. type Key [KeySize]byte -// Raw returns the key as a raw byte array pointer +// Raw returns the key as a raw byte array pointer. func (key *Key) Raw() *[KeySize]byte { return (*[KeySize]byte)(key) } -// IsZero returns true if key is nil or it points to its zero value +// IsZero returns true if key is nil or it points to its zero value. func (key *Key) IsZero() bool { return key == nil || *key == (Key{}) } -// ErrNonce is used when something goes wrong with a stream ID +// ErrNonce is used when something goes wrong with a stream ID. var ErrNonce = errs.Class("nonce error") -// nonceEncoding is base32 without padding +// nonceEncoding is base32 without padding. var nonceEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) -// Nonce represents the largest nonce used by any encryption protocol +// Nonce represents the largest nonce used by any encryption protocol. type Nonce [NonceSize]byte -// NonceFromString decodes an base32 encoded +// NonceFromString decodes an base32 encoded. func NonceFromString(s string) (Nonce, error) { nonceBytes, err := nonceEncoding.DecodeString(s) if err != nil { @@ -97,7 +97,7 @@ func NonceFromString(s string) (Nonce, error) { return NonceFromBytes(nonceBytes) } -// NonceFromBytes converts a byte slice into a nonce +// NonceFromBytes converts a byte slice into a nonce. func NonceFromBytes(b []byte) (Nonce, error) { if len(b) != len(Nonce{}) { return Nonce{}, ErrNonce.New("not enough bytes to make a nonce; have %d, need %d", len(b), len(NodeID{})) @@ -108,56 +108,56 @@ func NonceFromBytes(b []byte) (Nonce, error) { return nonce, nil } -// IsZero returns whether nonce is unassigned +// IsZero returns whether nonce is unassigned. func (nonce Nonce) IsZero() bool { return nonce == Nonce{} } -// String representation of the nonce +// String representation of the nonce. func (nonce Nonce) String() string { return nonceEncoding.EncodeToString(nonce.Bytes()) } -// Bytes returns bytes of the nonce +// Bytes returns bytes of the nonce. func (nonce Nonce) Bytes() []byte { return nonce[:] } -// Raw returns the nonce as a raw byte array pointer +// Raw returns the nonce as a raw byte array pointer. func (nonce *Nonce) Raw() *[NonceSize]byte { return (*[NonceSize]byte)(nonce) } -// Marshal serializes a nonce +// Marshal serializes a nonce. func (nonce Nonce) Marshal() ([]byte, error) { return nonce.Bytes(), nil } -// MarshalTo serializes a nonce into the passed byte slice +// MarshalTo serializes a nonce into the passed byte slice. func (nonce *Nonce) MarshalTo(data []byte) (n int, err error) { n = copy(data, nonce.Bytes()) return n, nil } -// Unmarshal deserializes a nonce +// Unmarshal deserializes a nonce. func (nonce *Nonce) Unmarshal(data []byte) error { var err error *nonce, err = NonceFromBytes(data) return err } -// Size returns the length of a nonce (implements gogo's custom type interface) +// Size returns the length of a nonce (implements gogo's custom type interface). func (nonce Nonce) Size() int { return len(nonce) } -// MarshalJSON serializes a nonce to a json string as bytes +// MarshalJSON serializes a nonce to a json string as bytes. func (nonce Nonce) MarshalJSON() ([]byte, error) { return []byte(`"` + nonce.String() + `"`), nil } -// UnmarshalJSON deserializes a json string (as bytes) to a nonce +// UnmarshalJSON deserializes a json string (as bytes) to a nonce. func (nonce *Nonce) UnmarshalJSON(data []byte) error { var err error *nonce, err = NonceFromString(string(data)) return err } -// EncryptedPrivateKey is a private key that has been encrypted +// EncryptedPrivateKey is a private key that has been encrypted. type EncryptedPrivateKey []byte diff --git a/vendor/storj.io/common/storj/metainfo.go b/vendor/storj.io/common/storj/metainfo.go index 75d6fdec8..00190dd3b 100644 --- a/vendor/storj.io/common/storj/metainfo.go +++ b/vendor/storj.io/common/storj/metainfo.go @@ -3,21 +3,21 @@ package storj -// ListDirection specifies listing direction +// ListDirection specifies listing direction. type ListDirection int8 const ( - // Before lists backwards from cursor, without cursor [NOT SUPPORTED] + // Before lists backwards from cursor, without cursor [NOT SUPPORTED]. Before = ListDirection(-2) - // Backward lists backwards from cursor, including cursor [NOT SUPPORTED] + // Backward lists backwards from cursor, including cursor [NOT SUPPORTED]. Backward = ListDirection(-1) - // Forward lists forwards from cursor, including cursor + // Forward lists forwards from cursor, including cursor. Forward = ListDirection(1) - // After lists forwards from cursor, without cursor + // After lists forwards from cursor, without cursor. After = ListDirection(2) ) -// ListOptions lists objects +// ListOptions lists objects. type ListOptions struct { Prefix Path Cursor Path // Cursor is relative to Prefix, full path is Prefix + Cursor @@ -27,7 +27,7 @@ type ListOptions struct { Limit int } -// ObjectList is a list of objects +// ObjectList is a list of objects. type ObjectList struct { Bucket string Prefix Path @@ -38,7 +38,7 @@ type ObjectList struct { Items []Object } -// NextPage returns options for listing the next page +// NextPage returns options for listing the next page. func (opts ListOptions) NextPage(list ObjectList) ListOptions { if !list.More || len(list.Items) == 0 { return ListOptions{} @@ -47,25 +47,27 @@ func (opts ListOptions) NextPage(list ObjectList) ListOptions { return ListOptions{ Prefix: opts.Prefix, Cursor: list.Items[len(list.Items)-1].Path, + Delimiter: opts.Delimiter, + Recursive: opts.Recursive, Direction: After, Limit: opts.Limit, } } -// BucketListOptions lists objects +// BucketListOptions lists objects. type BucketListOptions struct { Cursor string Direction ListDirection Limit int } -// BucketList is a list of buckets +// BucketList is a list of buckets. type BucketList struct { More bool Items []Bucket } -// NextPage returns options for listing the next page +// NextPage returns options for listing the next page. func (opts BucketListOptions) NextPage(list BucketList) BucketListOptions { if !list.More || len(list.Items) == 0 { return BucketListOptions{} diff --git a/vendor/storj.io/common/storj/node.go b/vendor/storj.io/common/storj/node.go index 5c05366a2..d0efc587b 100644 --- a/vendor/storj.io/common/storj/node.go +++ b/vendor/storj.io/common/storj/node.go @@ -23,13 +23,13 @@ var ( ErrVersion = errs.Class("node ID version error") ) -// NodeIDSize is the byte length of a NodeID +// NodeIDSize is the byte length of a NodeID. const NodeIDSize = sha256.Size -// NodeID is a unique node identifier +// NodeID is a unique node identifier. type NodeID [NodeIDSize]byte -// NodeIDList is a slice of NodeIDs (implements sort) +// NodeIDList is a slice of NodeIDs (implements sort). type NodeIDList []NodeID // NewVersionedID adds an identity version to a node ID. @@ -42,7 +42,7 @@ func NewVersionedID(id NodeID, version IDVersion) NodeID { } // NewVersionExt creates a new identity version certificate extension for the -// given identity version, +// given identity version. func NewVersionExt(version IDVersion) pkix.Extension { return pkix.Extension{ Id: extensions.IdentityVersionExtID, @@ -50,7 +50,7 @@ func NewVersionExt(version IDVersion) pkix.Extension { } } -// NodeIDFromString decodes a base58check encoded node id string +// NodeIDFromString decodes a base58check encoded node id string. func NodeIDFromString(s string) (NodeID, error) { idBytes, versionNumber, err := base58.CheckDecode(s) if err != nil { @@ -65,7 +65,7 @@ func NodeIDFromString(s string) (NodeID, error) { return NewVersionedID(unversionedID, version), nil } -// NodeIDsFromBytes converts a 2d byte slice into a list of nodes +// NodeIDsFromBytes converts a 2d byte slice into a list of nodes. func NodeIDsFromBytes(b [][]byte) (ids NodeIDList, err error) { var idErrs []error for _, idBytes := range b { @@ -84,7 +84,7 @@ func NodeIDsFromBytes(b [][]byte) (ids NodeIDList, err error) { return ids, nil } -// NodeIDFromBytes converts a byte slice into a node id +// NodeIDFromBytes converts a byte slice into a node id. func NodeIDFromBytes(b []byte) (NodeID, error) { bLen := len(b) if bLen != len(NodeID{}) { @@ -96,18 +96,18 @@ func NodeIDFromBytes(b []byte) (NodeID, error) { return id, nil } -// String returns NodeID as base58 encoded string with checksum and version bytes +// String returns NodeID as base58 encoded string with checksum and version bytes. func (id NodeID) String() string { unversionedID := id.unversioned() return base58.CheckEncode(unversionedID[:], byte(id.Version().Number)) } -// IsZero returns whether NodeID is unassigned +// IsZero returns whether NodeID is unassigned. func (id NodeID) IsZero() bool { return id == NodeID{} } -// Bytes returns raw bytes of the id +// Bytes returns raw bytes of the id. func (id NodeID) Bytes() []byte { return id[:] } // Less returns whether id is smaller than other in lexicographic order. @@ -122,7 +122,7 @@ func (id NodeID) Less(other NodeID) bool { return false } -// Version returns the version of the identity format +// Version returns the version of the identity format. func (id NodeID) Version() IDVersion { versionNumber := id.versionByte() if versionNumber == 0 { @@ -138,7 +138,7 @@ func (id NodeID) Version() IDVersion { return version } -// Difficulty returns the number of trailing zero bits in a node ID +// Difficulty returns the number of trailing zero bits in a node ID. func (id NodeID) Difficulty() (uint16, error) { idLen := len(id) var b byte @@ -161,18 +161,18 @@ func (id NodeID) Difficulty() (uint16, error) { return 0, ErrNodeID.New("difficulty matches id hash length: %d; hash (hex): % x", idLen, id) } -// Marshal serializes a node id +// Marshal serializes a node id. func (id NodeID) Marshal() ([]byte, error) { return id.Bytes(), nil } -// MarshalTo serializes a node ID into the passed byte slice +// MarshalTo serializes a node ID into the passed byte slice. func (id *NodeID) MarshalTo(data []byte) (n int, err error) { n = copy(data, id.Bytes()) return n, nil } -// Unmarshal deserializes a node ID +// Unmarshal deserializes a node ID. func (id *NodeID) Unmarshal(data []byte) error { var err error *id, err = NodeIDFromBytes(data) @@ -192,22 +192,22 @@ func (id NodeID) unversioned() NodeID { return unversionedID } -// Size returns the length of a node ID (implements gogo's custom type interface) +// Size returns the length of a node ID (implements gogo's custom type interface). func (id *NodeID) Size() int { return len(id) } -// MarshalJSON serializes a node ID to a json string as bytes +// MarshalJSON serializes a node ID to a json string as bytes. func (id NodeID) MarshalJSON() ([]byte, error) { return []byte(`"` + id.String() + `"`), nil } -// Value converts a NodeID to a database field +// Value converts a NodeID to a database field. func (id NodeID) Value() (driver.Value, error) { return id.Bytes(), nil } -// Scan extracts a NodeID from a database field +// Scan extracts a NodeID from a database field. func (id *NodeID) Scan(src interface{}) (err error) { b, ok := src.([]byte) if !ok { @@ -218,7 +218,7 @@ func (id *NodeID) Scan(src interface{}) (err error) { return err } -// UnmarshalJSON deserializes a json string (as bytes) to a node ID +// UnmarshalJSON deserializes a json string (as bytes) to a node ID. func (id *NodeID) UnmarshalJSON(data []byte) error { var unquoted string err := json.Unmarshal(data, &unquoted) @@ -233,7 +233,7 @@ func (id *NodeID) UnmarshalJSON(data []byte) error { return nil } -// Strings returns a string slice of the node IDs +// Strings returns a string slice of the node IDs. func (n NodeIDList) Strings() []string { var strings []string for _, nid := range n { @@ -242,7 +242,7 @@ func (n NodeIDList) Strings() []string { return strings } -// Bytes returns a 2d byte slice of the node IDs +// Bytes returns a 2d byte slice of the node IDs. func (n NodeIDList) Bytes() (idsBytes [][]byte) { for _, nid := range n { idsBytes = append(idsBytes, nid.Bytes()) @@ -250,11 +250,11 @@ func (n NodeIDList) Bytes() (idsBytes [][]byte) { return idsBytes } -// Len implements sort.Interface.Len() +// Len implements sort.Interface.Len(). func (n NodeIDList) Len() int { return len(n) } -// Swap implements sort.Interface.Swap() +// Swap implements sort.Interface.Swap(). func (n NodeIDList) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -// Less implements sort.Interface.Less() +// Less implements sort.Interface.Less(). func (n NodeIDList) Less(i, j int) bool { return n[i].Less(n[j]) } diff --git a/vendor/storj.io/common/storj/nodeurl.go b/vendor/storj.io/common/storj/nodeurl.go index 0074fef1f..f68922343 100644 --- a/vendor/storj.io/common/storj/nodeurl.go +++ b/vendor/storj.io/common/storj/nodeurl.go @@ -70,7 +70,7 @@ func (url NodeURL) IsZero() bool { return url == NodeURL{} } -// String converts NodeURL to a string +// String converts NodeURL to a string. func (url NodeURL) String() string { if url.ID.IsZero() { return url.Address @@ -78,7 +78,7 @@ func (url NodeURL) String() string { return url.ID.String() + "@" + url.Address } -// Set implements flag.Value interface +// Set implements flag.Value interface. func (url *NodeURL) Set(s string) error { parsed, err := ParseNodeURL(s) if err != nil { @@ -89,13 +89,13 @@ func (url *NodeURL) Set(s string) error { return nil } -// Type implements pflag.Value +// Type implements pflag.Value. func (NodeURL) Type() string { return "storj.NodeURL" } // NodeURLs defines a comma delimited flag for defining a list node url-s. type NodeURLs []NodeURL -// ParseNodeURLs parses comma delimited list of node urls +// ParseNodeURLs parses comma delimited list of node urls. func ParseNodeURLs(s string) (NodeURLs, error) { var urls NodeURLs if s == "" { @@ -113,7 +113,7 @@ func ParseNodeURLs(s string) (NodeURLs, error) { return urls, nil } -// String converts NodeURLs to a string +// String converts NodeURLs to a string. func (urls NodeURLs) String() string { var xs []string for _, u := range urls { @@ -122,7 +122,7 @@ func (urls NodeURLs) String() string { return strings.Join(xs, ",") } -// Set implements flag.Value interface +// Set implements flag.Value interface. func (urls *NodeURLs) Set(s string) error { parsed, err := ParseNodeURLs(s) if err != nil { @@ -133,5 +133,5 @@ func (urls *NodeURLs) Set(s string) error { return nil } -// Type implements pflag.Value +// Type implements pflag.Value. func (NodeURLs) Type() string { return "storj.NodeURLs" } diff --git a/vendor/storj.io/common/storj/object.go b/vendor/storj.io/common/storj/object.go index b0cc2d5e7..1f71a9583 100644 --- a/vendor/storj.io/common/storj/object.go +++ b/vendor/storj.io/common/storj/object.go @@ -10,14 +10,14 @@ import ( ) var ( - // ErrNoPath is an error class for using empty path + // ErrNoPath is an error class for using empty path. ErrNoPath = errs.Class("no path specified") - // ErrObjectNotFound is an error class for non-existing object + // ErrObjectNotFound is an error class for non-existing object. ErrObjectNotFound = errs.Class("object not found") ) -// Object contains information about a specific object +// Object contains information about a specific object. type Object struct { Version uint32 Bucket Bucket @@ -34,7 +34,7 @@ type Object struct { Stream } -// ObjectInfo contains information about a specific object +// ObjectInfo contains information about a specific object. type ObjectInfo struct { Version uint32 Bucket string @@ -53,7 +53,7 @@ type ObjectInfo struct { Stream } -// Stream is information about an object stream +// Stream is information about an object stream. type Stream struct { ID StreamID @@ -76,15 +76,16 @@ type Stream struct { LastSegment LastSegment // TODO: remove } -// LastSegment contains info about last segment -// TODO: remove +// LastSegment contains info about last segment. +// +// TODO: remove. type LastSegment struct { Size int64 EncryptedKeyNonce Nonce EncryptedKey EncryptedPrivateKey } -// Segment is full segment information +// Segment is full segment information. type Segment struct { Index int64 // Size is the size of the content in bytes @@ -101,7 +102,7 @@ type Segment struct { EncryptedKey EncryptedPrivateKey } -// Piece is information where a piece is located +// Piece is information where a piece is located. type Piece struct { Number byte Location NodeID diff --git a/vendor/storj.io/common/storj/object_list_item.go b/vendor/storj.io/common/storj/object_list_item.go index c6d051d8c..9387f5920 100644 --- a/vendor/storj.io/common/storj/object_list_item.go +++ b/vendor/storj.io/common/storj/object_list_item.go @@ -7,7 +7,7 @@ import ( "time" ) -// ObjectListItem represents listed object +// ObjectListItem represents listed object. type ObjectListItem struct { EncryptedPath []byte Version int32 diff --git a/vendor/storj.io/common/storj/path.go b/vendor/storj.io/common/storj/path.go index 0b44174e1..da4a17090 100644 --- a/vendor/storj.io/common/storj/path.go +++ b/vendor/storj.io/common/storj/path.go @@ -7,15 +7,15 @@ import ( "strings" ) -// Path represents a object path +// Path represents a object path. type Path = string -// SplitPath splits path into a slice of path components +// SplitPath splits path into a slice of path components. func SplitPath(path Path) []string { return strings.Split(path, "/") } -// JoinPaths concatenates paths to a new single path +// JoinPaths concatenates paths to a new single path. func JoinPaths(paths ...Path) Path { return strings.Join(paths, "/") } diff --git a/vendor/storj.io/common/storj/pieceid.go b/vendor/storj.io/common/storj/pieceid.go index bee444757..dd264babd 100644 --- a/vendor/storj.io/common/storj/pieceid.go +++ b/vendor/storj.io/common/storj/pieceid.go @@ -15,16 +15,16 @@ import ( "github.com/zeebo/errs" ) -// ErrPieceID is used when something goes wrong with a piece ID +// ErrPieceID is used when something goes wrong with a piece ID. var ErrPieceID = errs.Class("piece ID error") -// pieceIDEncoding is base32 without padding +// pieceIDEncoding is base32 without padding. var pieceIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) -// PieceID is the unique identifier for pieces +// PieceID is the unique identifier for pieces. type PieceID [32]byte -// NewPieceID creates a piece ID +// NewPieceID creates a piece ID. func NewPieceID() PieceID { var id PieceID @@ -36,7 +36,7 @@ func NewPieceID() PieceID { return id } -// PieceIDFromString decodes a hex encoded piece ID string +// PieceIDFromString decodes a hex encoded piece ID string. func PieceIDFromString(s string) (PieceID, error) { idBytes, err := pieceIDEncoding.DecodeString(s) if err != nil { @@ -45,7 +45,7 @@ func PieceIDFromString(s string) (PieceID, error) { return PieceIDFromBytes(idBytes) } -// PieceIDFromBytes converts a byte slice into a piece ID +// PieceIDFromBytes converts a byte slice into a piece ID. func PieceIDFromBytes(b []byte) (PieceID, error) { if len(b) != len(PieceID{}) { return PieceID{}, ErrPieceID.New("not enough bytes to make a piece ID; have %d, need %d", len(b), len(PieceID{})) @@ -56,18 +56,18 @@ func PieceIDFromBytes(b []byte) (PieceID, error) { return id, nil } -// IsZero returns whether piece ID is unassigned +// IsZero returns whether piece ID is unassigned. func (id PieceID) IsZero() bool { return id == PieceID{} } -// String representation of the piece ID +// String representation of the piece ID. func (id PieceID) String() string { return pieceIDEncoding.EncodeToString(id.Bytes()) } -// Bytes returns bytes of the piece ID +// Bytes returns bytes of the piece ID. func (id PieceID) Bytes() []byte { return id[:] } -// Derive a new PieceID from the current piece ID, the given storage node ID and piece number +// Derive a new PieceID from the current piece ID, the given storage node ID and piece number. func (id PieceID) Derive(storagenodeID NodeID, pieceNum int32) PieceID { // TODO: should the secret / content be swapped? mac := hmac.New(sha512.New, id.Bytes()) @@ -80,35 +80,35 @@ func (id PieceID) Derive(storagenodeID NodeID, pieceNum int32) PieceID { return derived } -// Marshal serializes a piece ID +// Marshal serializes a piece ID. func (id PieceID) Marshal() ([]byte, error) { return id.Bytes(), nil } -// MarshalTo serializes a piece ID into the passed byte slice +// MarshalTo serializes a piece ID into the passed byte slice. func (id *PieceID) MarshalTo(data []byte) (n int, err error) { n = copy(data, id.Bytes()) return n, nil } -// Unmarshal deserializes a piece ID +// Unmarshal deserializes a piece ID. func (id *PieceID) Unmarshal(data []byte) error { var err error *id, err = PieceIDFromBytes(data) return err } -// Size returns the length of a piece ID (implements gogo's custom type interface) +// Size returns the length of a piece ID (implements gogo's custom type interface). func (id *PieceID) Size() int { return len(id) } -// MarshalJSON serializes a piece ID to a json string as bytes +// MarshalJSON serializes a piece ID to a json string as bytes. func (id PieceID) MarshalJSON() ([]byte, error) { return []byte(`"` + id.String() + `"`), nil } -// UnmarshalJSON deserializes a json string (as bytes) to a piece ID +// UnmarshalJSON deserializes a json string (as bytes) to a piece ID. func (id *PieceID) UnmarshalJSON(data []byte) error { var unquoted string err := json.Unmarshal(data, &unquoted) @@ -123,12 +123,12 @@ func (id *PieceID) UnmarshalJSON(data []byte) error { return nil } -// Value set a PieceID to a database field +// Value set a PieceID to a database field. func (id PieceID) Value() (driver.Value, error) { return id.Bytes(), nil } -// Scan extracts a PieceID from a database field +// Scan extracts a PieceID from a database field. func (id *PieceID) Scan(src interface{}) (err error) { b, ok := src.([]byte) if !ok { diff --git a/vendor/storj.io/common/storj/piecekey.go b/vendor/storj.io/common/storj/piecekey.go index 6dd5f917a..f74ff7fce 100644 --- a/vendor/storj.io/common/storj/piecekey.go +++ b/vendor/storj.io/common/storj/piecekey.go @@ -10,20 +10,20 @@ import ( "golang.org/x/crypto/ed25519" ) -// ErrPieceKey is used when something goes wrong with a piece key +// ErrPieceKey is used when something goes wrong with a piece key. var ErrPieceKey = errs.Class("piece key error") -// PiecePublicKey is the unique identifier for pieces +// PiecePublicKey is the unique identifier for pieces. type PiecePublicKey struct { pub ed25519.PublicKey } -// PiecePrivateKey is the unique identifier for pieces +// PiecePrivateKey is the unique identifier for pieces. type PiecePrivateKey struct { priv ed25519.PrivateKey } -// NewPieceKey creates a piece key pair +// NewPieceKey creates a piece key pair. func NewPieceKey() (PiecePublicKey, PiecePrivateKey, error) { pub, priv, err := ed25519.GenerateKey(nil) @@ -65,10 +65,10 @@ func (key PiecePublicKey) Verify(data, signature []byte) error { return nil } -// Bytes returns bytes of the piece public key +// Bytes returns bytes of the piece public key. func (key PiecePublicKey) Bytes() []byte { return key.pub[:] } -// Bytes returns bytes of the piece private key +// Bytes returns bytes of the piece private key. func (key PiecePrivateKey) Bytes() []byte { return key.priv[:] } // IsZero returns whether the key is empty. @@ -77,25 +77,25 @@ func (key PiecePublicKey) IsZero() bool { return len(key.pub) == 0 } // IsZero returns whether the key is empty. func (key PiecePrivateKey) IsZero() bool { return len(key.priv) == 0 } -// Marshal serializes a piece public key +// Marshal serializes a piece public key. func (key PiecePublicKey) Marshal() ([]byte, error) { return key.Bytes(), nil } -// Marshal serializes a piece private key +// Marshal serializes a piece private key. func (key PiecePrivateKey) Marshal() ([]byte, error) { return key.Bytes(), nil } -// MarshalTo serializes a piece public key into the passed byte slice +// MarshalTo serializes a piece public key into the passed byte slice. func (key *PiecePublicKey) MarshalTo(data []byte) (n int, err error) { n = copy(data, key.Bytes()) return n, nil } -// MarshalTo serializes a piece private key into the passed byte slice +// MarshalTo serializes a piece private key into the passed byte slice. func (key *PiecePrivateKey) MarshalTo(data []byte) (n int, err error) { n = copy(data, key.Bytes()) return n, nil } -// Unmarshal deserializes a piece public key +// Unmarshal deserializes a piece public key. func (key *PiecePublicKey) Unmarshal(data []byte) error { // allow empty keys if len(data) == 0 { @@ -107,7 +107,7 @@ func (key *PiecePublicKey) Unmarshal(data []byte) error { return err } -// Unmarshal deserializes a piece private key +// Unmarshal deserializes a piece private key. func (key *PiecePrivateKey) Unmarshal(data []byte) error { // allow empty keys if len(data) == 0 { @@ -122,21 +122,21 @@ func (key *PiecePrivateKey) Unmarshal(data []byte) error { return err } -// Size returns the length of a piece public key (implements gogo's custom type interface) +// Size returns the length of a piece public key (implements gogo's custom type interface). func (key *PiecePublicKey) Size() int { return len(key.pub) } -// Size returns the length of a piece private key (implements gogo's custom type interface) +// Size returns the length of a piece private key (implements gogo's custom type interface). func (key *PiecePrivateKey) Size() int { return len(key.priv) } -// Value set a PiecePublicKey to a database field +// Value set a PiecePublicKey to a database field. func (key PiecePublicKey) Value() (driver.Value, error) { return key.Bytes(), nil } -// Value set a PiecePrivateKey to a database field +// Value set a PiecePrivateKey to a database field. func (key PiecePrivateKey) Value() (driver.Value, error) { return key.Bytes(), nil } -// Scan extracts a PiecePublicKey from a database field +// Scan extracts a PiecePublicKey from a database field. func (key *PiecePublicKey) Scan(src interface{}) (err error) { b, ok := src.([]byte) if !ok { @@ -147,7 +147,7 @@ func (key *PiecePublicKey) Scan(src interface{}) (err error) { return err } -// Scan extracts a PiecePrivateKey from a database field +// Scan extracts a PiecePrivateKey from a database field. func (key *PiecePrivateKey) Scan(src interface{}) (err error) { b, ok := src.([]byte) if !ok { diff --git a/vendor/storj.io/common/storj/redundancy.go b/vendor/storj.io/common/storj/redundancy.go index ad049a47f..015feb6b6 100644 --- a/vendor/storj.io/common/storj/redundancy.go +++ b/vendor/storj.io/common/storj/redundancy.go @@ -3,7 +3,7 @@ package storj -// RedundancyScheme specifies the parameters and the algorithm for redundancy +// RedundancyScheme specifies the parameters and the algorithm for redundancy. type RedundancyScheme struct { // Algorithm determines the algorithm to be used for redundancy. Algorithm RedundancyAlgorithm @@ -25,7 +25,7 @@ type RedundancyScheme struct { TotalShares int16 } -// IsZero returns true if no field in the struct is set to non-zero value +// IsZero returns true if no field in the struct is set to non-zero value. func (scheme RedundancyScheme) IsZero() bool { return scheme == (RedundancyScheme{}) } @@ -59,10 +59,10 @@ func (scheme RedundancyScheme) DownloadNodes() int32 { return needed } -// RedundancyAlgorithm is the algorithm used for redundancy +// RedundancyAlgorithm is the algorithm used for redundancy. type RedundancyAlgorithm byte -// List of supported redundancy algorithms +// List of supported redundancy algorithms. const ( InvalidRedundancyAlgorithm = RedundancyAlgorithm(iota) ReedSolomon diff --git a/vendor/storj.io/common/storj/segment.go b/vendor/storj.io/common/storj/segment.go index 520450cbb..54fe1afa8 100644 --- a/vendor/storj.io/common/storj/segment.go +++ b/vendor/storj.io/common/storj/segment.go @@ -3,18 +3,18 @@ package storj -// SegmentPosition segment position in object +// SegmentPosition segment position in object. type SegmentPosition struct { PartNumber int32 Index int32 } -// SegmentListItem represents listed segment +// SegmentListItem represents listed segment. type SegmentListItem struct { Position SegmentPosition } -// SegmentDownloadInfo represents segment download information inline/remote +// SegmentDownloadInfo represents segment download information inline/remote. type SegmentDownloadInfo struct { SegmentID SegmentID Size int64 @@ -25,7 +25,7 @@ type SegmentDownloadInfo struct { SegmentEncryption SegmentEncryption } -// SegmentEncryption represents segment encryption key and nonce +// SegmentEncryption represents segment encryption key and nonce. type SegmentEncryption struct { EncryptedKeyNonce Nonce EncryptedKey EncryptedPrivateKey diff --git a/vendor/storj.io/common/storj/segmentid.go b/vendor/storj.io/common/storj/segmentid.go index 8d9250e65..9754249bd 100644 --- a/vendor/storj.io/common/storj/segmentid.go +++ b/vendor/storj.io/common/storj/segmentid.go @@ -9,16 +9,16 @@ import ( "github.com/zeebo/errs" ) -// ErrSegmentID is used when something goes wrong with a segment ID +// ErrSegmentID is used when something goes wrong with a segment ID. var ErrSegmentID = errs.Class("segment ID error") -// segmentIDEncoding is base32 without padding +// segmentIDEncoding is base32 without padding. var segmentIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) -// SegmentID is the unique identifier for segment related to object +// SegmentID is the unique identifier for segment related to object. type SegmentID []byte -// SegmentIDFromString decodes an base32 encoded +// SegmentIDFromString decodes an base32 encoded. func SegmentIDFromString(s string) (SegmentID, error) { idBytes, err := segmentIDEncoding.DecodeString(s) if err != nil { @@ -27,7 +27,7 @@ func SegmentIDFromString(s string) (SegmentID, error) { return SegmentIDFromBytes(idBytes) } -// SegmentIDFromBytes converts a byte slice into a segment ID +// SegmentIDFromBytes converts a byte slice into a segment ID. func SegmentIDFromBytes(b []byte) (SegmentID, error) { // return error will be used in future implementation id := make([]byte, len(b)) @@ -35,45 +35,45 @@ func SegmentIDFromBytes(b []byte) (SegmentID, error) { return id, nil } -// IsZero returns whether segment ID is unassigned +// IsZero returns whether segment ID is unassigned. func (id SegmentID) IsZero() bool { return len(id) == 0 } -// String representation of the segment ID +// String representation of the segment ID. func (id SegmentID) String() string { return segmentIDEncoding.EncodeToString(id.Bytes()) } -// Bytes returns bytes of the segment ID +// Bytes returns bytes of the segment ID. func (id SegmentID) Bytes() []byte { return id[:] } -// Marshal serializes a segment ID (implements gogo's custom type interface) +// Marshal serializes a segment ID (implements gogo's custom type interface). func (id SegmentID) Marshal() ([]byte, error) { return id.Bytes(), nil } -// MarshalTo serializes a segment ID into the passed byte slice (implements gogo's custom type interface) +// MarshalTo serializes a segment ID into the passed byte slice (implements gogo's custom type interface). func (id *SegmentID) MarshalTo(data []byte) (n int, err error) { return copy(data, id.Bytes()), nil } -// Unmarshal deserializes a segment ID (implements gogo's custom type interface) +// Unmarshal deserializes a segment ID (implements gogo's custom type interface). func (id *SegmentID) Unmarshal(data []byte) error { var err error *id, err = SegmentIDFromBytes(data) return err } -// Size returns the length of a segment ID (implements gogo's custom type interface) +// Size returns the length of a segment ID (implements gogo's custom type interface). func (id SegmentID) Size() int { return len(id) } -// MarshalJSON serializes a segment ID to a json string as bytes (implements gogo's custom type interface) +// MarshalJSON serializes a segment ID to a json string as bytes (implements gogo's custom type interface). func (id SegmentID) MarshalJSON() ([]byte, error) { return []byte(`"` + id.String() + `"`), nil } -// UnmarshalJSON deserializes a json string (as bytes) to a segment ID (implements gogo's custom type interface) +// UnmarshalJSON deserializes a json string (as bytes) to a segment ID (implements gogo's custom type interface). func (id *SegmentID) UnmarshalJSON(data []byte) error { var err error *id, err = SegmentIDFromString(string(data)) diff --git a/vendor/storj.io/common/storj/serialnumber.go b/vendor/storj.io/common/storj/serialnumber.go index 525a742f0..1deef724d 100644 --- a/vendor/storj.io/common/storj/serialnumber.go +++ b/vendor/storj.io/common/storj/serialnumber.go @@ -10,16 +10,16 @@ import ( "github.com/zeebo/errs" ) -// ErrSerialNumber is used when something goes wrong with a serial number +// ErrSerialNumber is used when something goes wrong with a serial number. var ErrSerialNumber = errs.Class("serial number error") -// serialNumberEncoding is base32 without padding +// serialNumberEncoding is base32 without padding. var serialNumberEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) -// SerialNumber is the unique identifier for pieces +// SerialNumber is the unique identifier for pieces. type SerialNumber [16]byte -// SerialNumberFromString decodes an base32 encoded +// SerialNumberFromString decodes an base32 encoded. func SerialNumberFromString(s string) (SerialNumber, error) { idBytes, err := serialNumberEncoding.DecodeString(s) if err != nil { @@ -28,7 +28,7 @@ func SerialNumberFromString(s string) (SerialNumber, error) { return SerialNumberFromBytes(idBytes) } -// SerialNumberFromBytes converts a byte slice into a serial number +// SerialNumberFromBytes converts a byte slice into a serial number. func SerialNumberFromBytes(b []byte) (SerialNumber, error) { if len(b) != len(SerialNumber{}) { return SerialNumber{}, ErrSerialNumber.New("not enough bytes to make a serial number; have %d, need %d", len(b), len(NodeID{})) @@ -39,7 +39,7 @@ func SerialNumberFromBytes(b []byte) (SerialNumber, error) { return id, nil } -// IsZero returns whether serial number is unassigned +// IsZero returns whether serial number is unassigned. func (id SerialNumber) IsZero() bool { return id == SerialNumber{} } @@ -56,41 +56,41 @@ func (id SerialNumber) Less(other SerialNumber) bool { return false } -// String representation of the serial number +// String representation of the serial number. func (id SerialNumber) String() string { return serialNumberEncoding.EncodeToString(id.Bytes()) } -// Bytes returns bytes of the serial number +// Bytes returns bytes of the serial number. func (id SerialNumber) Bytes() []byte { return id[:] } -// Marshal serializes a serial number +// Marshal serializes a serial number. func (id SerialNumber) Marshal() ([]byte, error) { return id.Bytes(), nil } -// MarshalTo serializes a serial number into the passed byte slice +// MarshalTo serializes a serial number into the passed byte slice. func (id *SerialNumber) MarshalTo(data []byte) (n int, err error) { n = copy(data, id.Bytes()) return n, nil } -// Unmarshal deserializes a serial number +// Unmarshal deserializes a serial number. func (id *SerialNumber) Unmarshal(data []byte) error { var err error *id, err = SerialNumberFromBytes(data) return err } -// Size returns the length of a serial number (implements gogo's custom type interface) +// Size returns the length of a serial number (implements gogo's custom type interface). func (id *SerialNumber) Size() int { return len(id) } -// MarshalJSON serializes a serial number to a json string as bytes +// MarshalJSON serializes a serial number to a json string as bytes. func (id SerialNumber) MarshalJSON() ([]byte, error) { return []byte(`"` + id.String() + `"`), nil } -// UnmarshalJSON deserializes a json string (as bytes) to a serial number +// UnmarshalJSON deserializes a json string (as bytes) to a serial number. func (id *SerialNumber) UnmarshalJSON(data []byte) error { var err error *id, err = SerialNumberFromString(string(data)) @@ -100,12 +100,12 @@ func (id *SerialNumber) UnmarshalJSON(data []byte) error { return nil } -// Value set a SerialNumber to a database field +// Value set a SerialNumber to a database field. func (id SerialNumber) Value() (driver.Value, error) { return id.Bytes(), nil } -// Scan extracts a SerialNumber from a database field +// Scan extracts a SerialNumber from a database field. func (id *SerialNumber) Scan(src interface{}) (err error) { b, ok := src.([]byte) if !ok { diff --git a/vendor/storj.io/common/storj/streamid.go b/vendor/storj.io/common/storj/streamid.go index 91c3a6bc9..2bd6ce5bd 100644 --- a/vendor/storj.io/common/storj/streamid.go +++ b/vendor/storj.io/common/storj/streamid.go @@ -10,16 +10,16 @@ import ( "github.com/zeebo/errs" ) -// ErrStreamID is used when something goes wrong with a stream ID +// ErrStreamID is used when something goes wrong with a stream ID. var ErrStreamID = errs.Class("stream ID error") -// streamIDEncoding is base32 without padding +// streamIDEncoding is base32 without padding. var streamIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) -// StreamID is the unique identifier for stream related to object +// StreamID is the unique identifier for stream related to object. type StreamID []byte -// StreamIDFromString decodes an base32 encoded +// StreamIDFromString decodes an base32 encoded. func StreamIDFromString(s string) (StreamID, error) { idBytes, err := streamIDEncoding.DecodeString(s) if err != nil { @@ -28,53 +28,53 @@ func StreamIDFromString(s string) (StreamID, error) { return StreamIDFromBytes(idBytes) } -// StreamIDFromBytes converts a byte slice into a stream ID +// StreamIDFromBytes converts a byte slice into a stream ID. func StreamIDFromBytes(b []byte) (StreamID, error) { id := make([]byte, len(b)) copy(id, b) return id, nil } -// IsZero returns whether stream ID is unassigned +// IsZero returns whether stream ID is unassigned. func (id StreamID) IsZero() bool { return len(id) == 0 } -// String representation of the stream ID +// String representation of the stream ID. func (id StreamID) String() string { return streamIDEncoding.EncodeToString(id.Bytes()) } -// Bytes returns bytes of the stream ID +// Bytes returns bytes of the stream ID. func (id StreamID) Bytes() []byte { return id[:] } -// Marshal serializes a stream ID +// Marshal serializes a stream ID. func (id StreamID) Marshal() ([]byte, error) { return id.Bytes(), nil } -// MarshalTo serializes a stream ID into the passed byte slice +// MarshalTo serializes a stream ID into the passed byte slice. func (id *StreamID) MarshalTo(data []byte) (n int, err error) { n = copy(data, id.Bytes()) return n, nil } -// Unmarshal deserializes a stream ID +// Unmarshal deserializes a stream ID. func (id *StreamID) Unmarshal(data []byte) error { var err error *id, err = StreamIDFromBytes(data) return err } -// Size returns the length of a stream ID (implements gogo's custom type interface) +// Size returns the length of a stream ID (implements gogo's custom type interface). func (id StreamID) Size() int { return len(id) } -// MarshalJSON serializes a stream ID to a json string as bytes +// MarshalJSON serializes a stream ID to a json string as bytes. func (id StreamID) MarshalJSON() ([]byte, error) { return []byte(`"` + id.String() + `"`), nil } -// UnmarshalJSON deserializes a json string (as bytes) to a stream ID +// UnmarshalJSON deserializes a json string (as bytes) to a stream ID. func (id *StreamID) UnmarshalJSON(data []byte) error { var err error *id, err = StreamIDFromString(string(data)) @@ -84,12 +84,12 @@ func (id *StreamID) UnmarshalJSON(data []byte) error { return nil } -// Value set a stream ID to a database field +// Value set a stream ID to a database field. func (id StreamID) Value() (driver.Value, error) { return id.Bytes(), nil } -// Scan extracts a stream ID from a database field +// Scan extracts a stream ID from a database field. func (id *StreamID) Scan(src interface{}) (err error) { b, ok := src.([]byte) if !ok { diff --git a/vendor/storj.io/common/sync2/copy.go b/vendor/storj.io/common/sync2/copy.go index 360f97d4e..f7ea15545 100644 --- a/vendor/storj.io/common/sync2/copy.go +++ b/vendor/storj.io/common/sync2/copy.go @@ -16,7 +16,7 @@ type readerFunc func(p []byte) (n int, err error) func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } -// Copy implements copying with cancellation +// Copy implements copying with cancellation. func Copy(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { defer mon.Task()(&ctx)(&err) written, err = io.Copy(dst, readerFunc(func(p []byte) (int, error) { diff --git a/vendor/storj.io/common/sync2/cycle.go b/vendor/storj.io/common/sync2/cycle.go index 058245518..d72503a5b 100644 --- a/vendor/storj.io/common/sync2/cycle.go +++ b/vendor/storj.io/common/sync2/cycle.go @@ -169,7 +169,7 @@ func (cycle *Cycle) Close() { close(cycle.control) } -// sendControl sends a control message +// sendControl sends a control message. func (cycle *Cycle) sendControl(message interface{}) { cycle.initialize() select { @@ -178,7 +178,7 @@ func (cycle *Cycle) sendControl(message interface{}) { } } -// Stop stops the cycle permanently +// Stop stops the cycle permanently. func (cycle *Cycle) Stop() { cycle.initialize() if atomic.CompareAndSwapInt32(&cycle.stopsent, 0, 1) { diff --git a/vendor/storj.io/common/sync2/fence.go b/vendor/storj.io/common/sync2/fence.go index c6539f8c2..655899880 100644 --- a/vendor/storj.io/common/sync2/fence.go +++ b/vendor/storj.io/common/sync2/fence.go @@ -17,14 +17,14 @@ type Fence struct { done chan struct{} } -// init sets up the initial lock into wait +// init sets up the initial lock into wait. func (fence *Fence) init() { fence.setup.Do(func() { fence.done = make(chan struct{}) }) } -// Release releases everyone from Wait +// Release releases everyone from Wait. func (fence *Fence) Release() { fence.init() fence.release.Do(func() { close(fence.done) }) diff --git a/vendor/storj.io/common/sync2/io.go b/vendor/storj.io/common/sync2/io.go index 06ce0f000..cd97bbdd3 100644 --- a/vendor/storj.io/common/sync2/io.go +++ b/vendor/storj.io/common/sync2/io.go @@ -9,32 +9,32 @@ import ( "sync/atomic" ) -// ReadAtWriteAtCloser implements all io.ReaderAt, io.WriterAt and io.Closer +// ReadAtWriteAtCloser implements all io.ReaderAt, io.WriterAt and io.Closer. type ReadAtWriteAtCloser interface { io.ReaderAt io.WriterAt io.Closer } -// PipeWriter allows closing the writer with an error +// PipeWriter allows closing the writer with an error. type PipeWriter interface { io.WriteCloser CloseWithError(reason error) error } -// PipeReader allows closing the reader with an error +// PipeReader allows closing the reader with an error. type PipeReader interface { io.ReadCloser CloseWithError(reason error) error } -// memory implements ReadAtWriteAtCloser on a memory buffer +// memory implements ReadAtWriteAtCloser on a memory buffer. type memory []byte -// Size returns size of memory buffer +// Size returns size of memory buffer. func (memory memory) Size() int { return len(memory) } -// ReadAt implements io.ReaderAt methods +// ReadAt implements io.ReaderAt methods. func (memory memory) ReadAt(data []byte, at int64) (amount int, err error) { if at > int64(len(memory)) { return 0, io.ErrClosedPipe @@ -43,7 +43,7 @@ func (memory memory) ReadAt(data []byte, at int64) (amount int, err error) { return amount, nil } -// WriteAt implements io.WriterAt methods +// WriteAt implements io.WriterAt methods. func (memory memory) WriteAt(data []byte, at int64) (amount int, err error) { if at > int64(len(memory)) { return 0, io.ErrClosedPipe @@ -52,27 +52,27 @@ func (memory memory) WriteAt(data []byte, at int64) (amount int, err error) { return amount, nil } -// Close implements io.Closer implementation +// Close implements io.Closer implementation. func (memory memory) Close() error { return nil } -// offsetFile implements ReadAt, WriteAt offset to the file with reference counting +// offsetFile implements ReadAt, WriteAt offset to the file with reference counting. type offsetFile struct { file *os.File offset int64 open *int64 // number of handles open } -// ReadAt implements io.ReaderAt methods +// ReadAt implements io.ReaderAt methods. func (file offsetFile) ReadAt(data []byte, at int64) (amount int, err error) { return file.file.ReadAt(data, file.offset+at) } -// WriteAt implements io.WriterAt methods +// WriteAt implements io.WriterAt methods. func (file offsetFile) WriteAt(data []byte, at int64) (amount int, err error) { return file.file.WriteAt(data, file.offset+at) } -// Close implements io.Closer methods +// Close implements io.Closer methods. func (file offsetFile) Close() error { if atomic.AddInt64(file.open, -1) == 0 { return file.file.Close() diff --git a/vendor/storj.io/common/sync2/pipe.go b/vendor/storj.io/common/sync2/pipe.go index 5e639d65e..24a274123 100644 --- a/vendor/storj.io/common/sync2/pipe.go +++ b/vendor/storj.io/common/sync2/pipe.go @@ -11,7 +11,7 @@ import ( "sync" ) -// pipe is a io.Reader/io.Writer pipe backed by ReadAtWriteAtCloser +// pipe is a io.Reader/io.Writer pipe backed by ReadAtWriteAtCloser. type pipe struct { noCopy noCopy // nolint: structcheck @@ -30,7 +30,7 @@ type pipe struct { readerErr error } -// NewPipeFile returns a pipe that uses file-system to offload memory +// NewPipeFile returns a pipe that uses file-system to offload memory. func NewPipeFile(tempdir string) (PipeReader, PipeWriter, error) { tempfile, err := ioutil.TempFile(tempdir, "filepipe") if err != nil { @@ -50,7 +50,7 @@ func NewPipeFile(tempdir string) (PipeReader, PipeWriter, error) { return pipeReader{pipe}, pipeWriter{pipe}, nil } -// NewPipeMemory returns a pipe that uses an in-memory buffer +// NewPipeMemory returns a pipe that uses an in-memory buffer. func NewPipeMemory(pipeSize int64) (PipeReader, PipeWriter, error) { pipe := &pipe{ buffer: make(memory, pipeSize), @@ -63,13 +63,13 @@ func NewPipeMemory(pipeSize int64) (PipeReader, PipeWriter, error) { type pipeReader struct{ pipe *pipe } type pipeWriter struct{ pipe *pipe } -// Close implements io.Reader Close +// Close implements io.Reader Close. func (reader pipeReader) Close() error { return reader.CloseWithError(nil) } -// Close implements io.Writer Close +// Close implements io.Writer Close. func (writer pipeWriter) Close() error { return writer.CloseWithError(nil) } -// CloseWithError implements closing with error +// CloseWithError implements closing with error. func (reader pipeReader) CloseWithError(err error) error { if err == nil { err = io.ErrClosedPipe @@ -88,7 +88,7 @@ func (reader pipeReader) CloseWithError(err error) error { return pipe.buffer.Close() } -// CloseWithError implements closing with error +// CloseWithError implements closing with error. func (writer pipeWriter) CloseWithError(err error) error { if err == nil { err = io.EOF @@ -108,7 +108,7 @@ func (writer pipeWriter) CloseWithError(err error) error { return pipe.buffer.Close() } -// Write writes to the pipe returning io.ErrClosedPipe when pipeSize is reached +// Write writes to the pipe returning io.ErrClosedPipe when pipeSize is reached. func (writer pipeWriter) Write(data []byte) (n int, err error) { pipe := writer.pipe pipe.mu.Lock() @@ -161,7 +161,7 @@ func (writer pipeWriter) Write(data []byte) (n int, err error) { return writeAmount, err } -// Read reads from the pipe returning io.EOF when writer is closed or pipeSize is reached +// Read reads from the pipe returning io.EOF when writer is closed or pipeSize is reached. func (reader pipeReader) Read(data []byte) (n int, err error) { pipe := reader.pipe pipe.mu.Lock() @@ -214,13 +214,13 @@ func (reader pipeReader) Read(data []byte) (n int, err error) { return readAmount, err } -// MultiPipe is a multipipe backed by a single file +// MultiPipe is a multipipe backed by a single file. type MultiPipe struct { pipes []pipe } // NewMultiPipeFile returns a new MultiPipe that is created in tempdir -// if tempdir == "" the fill will be created it into os.TempDir +// if tempdir == "" the fill will be created it into os.TempDir. func NewMultiPipeFile(tempdir string, pipeCount, pipeSize int64) (*MultiPipe, error) { tempfile, err := ioutil.TempFile(tempdir, "multifilepipe") if err != nil { @@ -255,7 +255,7 @@ func NewMultiPipeFile(tempdir string, pipeCount, pipeSize int64) (*MultiPipe, er return multipipe, nil } -// NewMultiPipeMemory returns a new MultiPipe that is using a memory buffer +// NewMultiPipeMemory returns a new MultiPipe that is using a memory buffer. func NewMultiPipeMemory(pipeCount, pipeSize int64) (*MultiPipe, error) { buffer := make(memory, pipeCount*pipeSize) @@ -273,7 +273,7 @@ func NewMultiPipeMemory(pipeCount, pipeSize int64) (*MultiPipe, error) { return multipipe, nil } -// Pipe returns the two ends of a block stream pipe +// Pipe returns the two ends of a block stream pipe. func (multipipe *MultiPipe) Pipe(index int) (PipeReader, PipeWriter) { pipe := &multipipe.pipes[index] return pipeReader{pipe}, pipeWriter{pipe} diff --git a/vendor/storj.io/common/sync2/semaphore.go b/vendor/storj.io/common/sync2/semaphore.go index bd684cdac..9b89b0cdb 100644 --- a/vendor/storj.io/common/sync2/semaphore.go +++ b/vendor/storj.io/common/sync2/semaphore.go @@ -9,7 +9,7 @@ import ( "golang.org/x/sync/semaphore" ) -// Semaphore implements a closable semaphore +// Semaphore implements a closable semaphore. type Semaphore struct { noCopy noCopy // nolint: structcheck diff --git a/vendor/storj.io/common/sync2/sleep.go b/vendor/storj.io/common/sync2/sleep.go index 586a05e99..532dce630 100644 --- a/vendor/storj.io/common/sync2/sleep.go +++ b/vendor/storj.io/common/sync2/sleep.go @@ -8,7 +8,7 @@ import ( "time" ) -// Sleep implements sleeping with cancellation +// Sleep implements sleeping with cancellation. func Sleep(ctx context.Context, duration time.Duration) bool { timer := time.NewTimer(duration) defer timer.Stop() diff --git a/vendor/storj.io/common/sync2/tee.go b/vendor/storj.io/common/sync2/tee.go index 2e333d26e..9cf9d6ff1 100644 --- a/vendor/storj.io/common/sync2/tee.go +++ b/vendor/storj.io/common/sync2/tee.go @@ -28,7 +28,7 @@ type tee struct { writerErr error } -// NewTeeFile returns a tee that uses file-system to offload memory +// NewTeeFile returns a tee that uses file-system to offload memory. func NewTeeFile(readers int, tempdir string) ([]PipeReader, PipeWriter, error) { file, err := tmpfile.New(tempdir, "tee") if err != nil { @@ -45,7 +45,7 @@ func NewTeeFile(readers int, tempdir string) ([]PipeReader, PipeWriter, error) { return newTee(buffer, readers, &handles) } -// NewTeeInmemory returns a tee that uses inmemory +// NewTeeInmemory returns a tee that uses inmemory. func NewTeeInmemory(readers int, allocMemory int64) ([]PipeReader, PipeWriter, error) { handles := int64(readers + 1) // +1 for the writer memory := memory(make([]byte, allocMemory)) @@ -124,7 +124,7 @@ func (reader *teeReader) Read(data []byte) (n int, err error) { return readAmount, err } -// Write writes to the buffer returning io.ErrClosedPipe when limit is reached +// Write writes to the buffer returning io.ErrClosedPipe when limit is reached. // // It will block until at least one reader require the data. func (writer *teeWriter) Write(data []byte) (n int, err error) { @@ -163,13 +163,13 @@ func (writer *teeWriter) Write(data []byte) (n int, err error) { return writeAmount, err } -// Close implements io.Reader Close +// Close implements io.Reader Close. func (reader *teeReader) Close() error { return reader.CloseWithError(nil) } -// Close implements io.Writer Close +// Close implements io.Writer Close. func (writer *teeWriter) Close() error { return writer.CloseWithError(nil) } -// CloseWithError implements closing with error +// CloseWithError implements closing with error. func (reader *teeReader) CloseWithError(reason error) (err error) { tee := reader.tee if atomic.CompareAndSwapInt32(&reader.closed, 0, 1) { @@ -183,7 +183,7 @@ func (reader *teeReader) CloseWithError(reason error) (err error) { return err } -// CloseWithError implements closing with error +// CloseWithError implements closing with error. func (writer *teeWriter) CloseWithError(reason error) error { if reason == nil { reason = io.EOF diff --git a/vendor/storj.io/common/sync2/throttle.go b/vendor/storj.io/common/sync2/throttle.go index 9c5d4143a..dd35b59c2 100644 --- a/vendor/storj.io/common/sync2/throttle.go +++ b/vendor/storj.io/common/sync2/throttle.go @@ -7,7 +7,7 @@ import ( "sync" ) -// Throttle implements two-sided throttling, between a consumer and producer +// Throttle implements two-sided throttling, between a consumer and producer. type Throttle struct { noCopy noCopy // nolint: structcheck @@ -24,7 +24,7 @@ type Throttle struct { available int64 } -// NewThrottle returns a new Throttle primitive +// NewThrottle returns a new Throttle primitive. func NewThrottle() *Throttle { var throttle Throttle throttle.consumer.L = &throttle.mu @@ -32,7 +32,7 @@ func NewThrottle() *Throttle { return &throttle } -// Consume subtracts amount from the throttle +// Consume subtracts amount from the throttle. func (throttle *Throttle) Consume(amount int64) error { throttle.mu.Lock() defer throttle.mu.Unlock() @@ -41,7 +41,7 @@ func (throttle *Throttle) Consume(amount int64) error { return throttle.combinedError() } -// ConsumeOrWait tries to consume at most maxAmount +// ConsumeOrWait tries to consume at most maxAmount. func (throttle *Throttle) ConsumeOrWait(maxAmount int64) (int64, error) { throttle.mu.Lock() defer throttle.mu.Unlock() @@ -60,7 +60,7 @@ func (throttle *Throttle) ConsumeOrWait(maxAmount int64) (int64, error) { return available, throttle.combinedError() } -// WaitUntilAbove waits until availability drops below limit +// WaitUntilAbove waits until availability drops below limit. func (throttle *Throttle) WaitUntilAbove(limit int64) error { throttle.mu.Lock() defer throttle.mu.Unlock() @@ -70,7 +70,7 @@ func (throttle *Throttle) WaitUntilAbove(limit int64) error { return throttle.combinedError() } -// Produce adds amount to the throttle +// Produce adds amount to the throttle. func (throttle *Throttle) Produce(amount int64) error { throttle.mu.Lock() defer throttle.mu.Unlock() @@ -79,7 +79,7 @@ func (throttle *Throttle) Produce(amount int64) error { return throttle.combinedError() } -// ProduceAndWaitUntilBelow adds amount to the throttle and waits until it's below the given threshold +// ProduceAndWaitUntilBelow adds amount to the throttle and waits until it's below the given threshold. func (throttle *Throttle) ProduceAndWaitUntilBelow(amount, limit int64) error { throttle.mu.Lock() defer throttle.mu.Unlock() @@ -91,7 +91,7 @@ func (throttle *Throttle) ProduceAndWaitUntilBelow(amount, limit int64) error { return throttle.combinedError() } -// WaitUntilBelow waits until availability drops below limit +// WaitUntilBelow waits until availability drops below limit. func (throttle *Throttle) WaitUntilBelow(limit int64) error { throttle.mu.Lock() defer throttle.mu.Unlock() @@ -101,7 +101,7 @@ func (throttle *Throttle) WaitUntilBelow(limit int64) error { return throttle.combinedError() } -// Fail stops both consumer and allocator +// Fail stops both consumer and allocator. func (throttle *Throttle) Fail(err error) { throttle.mu.Lock() defer throttle.mu.Unlock() @@ -111,7 +111,7 @@ func (throttle *Throttle) Fail(err error) { throttle.producer.Signal() } -// must hold mutex when calling this +// must hold mutex when calling this. func (throttle *Throttle) alive() bool { return len(throttle.errs) == 0 } func (throttle *Throttle) combinedError() error { @@ -122,7 +122,7 @@ func (throttle *Throttle) combinedError() error { return throttle.errs[0] } -// Err returns the finishing error +// Err returns the finishing error. func (throttle *Throttle) Err() error { throttle.mu.Lock() defer throttle.mu.Unlock() diff --git a/vendor/storj.io/common/sync2/workgroup.go b/vendor/storj.io/common/sync2/workgroup.go index 8507a7740..19c1a27c6 100644 --- a/vendor/storj.io/common/sync2/workgroup.go +++ b/vendor/storj.io/common/sync2/workgroup.go @@ -7,7 +7,7 @@ import ( "sync" ) -// WorkGroup implements waitable and closable group of workers +// WorkGroup implements waitable and closable group of workers. type WorkGroup struct { noCopy noCopy // nolint: structcheck @@ -19,7 +19,7 @@ type WorkGroup struct { workers int } -// init initializes work group +// init initializes work group. func (group *WorkGroup) init() { if !group.initialized { group.cond.L = &group.mu @@ -39,7 +39,7 @@ func (group *WorkGroup) Go(fn func()) bool { return true } -// Start returns true when work can be started +// Start returns true when work can be started. func (group *WorkGroup) Start() bool { group.mu.Lock() defer group.mu.Unlock() @@ -52,7 +52,7 @@ func (group *WorkGroup) Start() bool { return true } -// Done finishes a pending work item +// Done finishes a pending work item. func (group *WorkGroup) Done() { group.mu.Lock() defer group.mu.Unlock() diff --git a/vendor/storj.io/drpc/README.md b/vendor/storj.io/drpc/README.md index 79d3a28a4..10a32c410 100644 --- a/vendor/storj.io/drpc/README.md +++ b/vendor/storj.io/drpc/README.md @@ -22,6 +22,9 @@ type Conn interface { // Close closes the connection. Close() error + // Closed returns true if the connection is definitely closed. + Closed() bool + // Transport returns the transport the connection is using. Transport() Transport diff --git a/vendor/storj.io/drpc/drpc.go b/vendor/storj.io/drpc/drpc.go index d3e7ca81b..8618d252b 100644 --- a/vendor/storj.io/drpc/drpc.go +++ b/vendor/storj.io/drpc/drpc.go @@ -37,6 +37,9 @@ type Conn interface { // Close closes the connection. Close() error + // Closed returns true if the connection is definitely closed. + Closed() bool + // Transport returns the transport the connection is using. Transport() Transport diff --git a/vendor/storj.io/drpc/drpccache/README.md b/vendor/storj.io/drpc/drpccache/README.md new file mode 100644 index 000000000..5739abcc3 --- /dev/null +++ b/vendor/storj.io/drpc/drpccache/README.md @@ -0,0 +1,74 @@ +# package drpccache + +`import "storj.io/drpc/drpccache"` + +Package drpccache implements per stream cache for drpc. + +## Usage + +#### func WithContext + +```go +func WithContext(parent context.Context, cache *Cache) context.Context +``` +WithContext returns a context with the value cache associated with the context. + +#### type Cache + +```go +type Cache struct { +} +``` + +Cache is a per stream cache. + +#### func FromContext + +```go +func FromContext(ctx context.Context) *Cache +``` +FromContext returns a cache from a context. + +Example usage: + + cache := drpccache.FromContext(stream.Context()) + if cache != nil { + value := cache.LoadOrCreate("initialized", func() (interface{}) { + return 42 + }) + } + +#### func New + +```go +func New() *Cache +``` +New returns a new cache. + +#### func (*Cache) Clear + +```go +func (cache *Cache) Clear() +``` +Clear clears the cache. + +#### func (*Cache) Load + +```go +func (cache *Cache) Load(key interface{}) interface{} +``` +Load returns the value with the given key. + +#### func (*Cache) LoadOrCreate + +```go +func (cache *Cache) LoadOrCreate(key interface{}, fn func() interface{}) interface{} +``` +LoadOrCreate returns the value with the given key. + +#### func (*Cache) Store + +```go +func (cache *Cache) Store(key, value interface{}) +``` +Store sets the value at a key. diff --git a/vendor/storj.io/drpc/drpccache/cache.go b/vendor/storj.io/drpc/drpccache/cache.go new file mode 100644 index 000000000..040598335 --- /dev/null +++ b/vendor/storj.io/drpc/drpccache/cache.go @@ -0,0 +1,101 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpccache + +import ( + "context" + "sync" +) + +type cacheKey struct{} + +// Cache is a per stream cache. +type Cache struct { + mu sync.Mutex + values map[interface{}]interface{} +} + +// New returns a new cache. +func New() *Cache { return &Cache{} } + +// FromContext returns a cache from a context. +// +// Example usage: +// +// cache := drpccache.FromContext(stream.Context()) +// if cache != nil { +// value := cache.LoadOrCreate("initialized", func() (interface{}) { +// return 42 +// }) +// } +func FromContext(ctx context.Context) *Cache { + value := ctx.Value(cacheKey{}) + if value == nil { + return nil + } + + cache, ok := value.(*Cache) + if !ok { + return nil + } + + return cache +} + +// WithContext returns a context with the value cache associated with the context. +func WithContext(parent context.Context, cache *Cache) context.Context { + return context.WithValue(parent, cacheKey{}, cache) +} + +// init ensures that the values map exist. +func (cache *Cache) init() { + if cache.values == nil { + cache.values = map[interface{}]interface{}{} + } +} + +// Clear clears the cache. +func (cache *Cache) Clear() { + cache.mu.Lock() + defer cache.mu.Unlock() + + cache.values = nil +} + +// Store sets the value at a key. +func (cache *Cache) Store(key, value interface{}) { + cache.mu.Lock() + defer cache.mu.Unlock() + + cache.init() + cache.values[key] = value +} + +// Load returns the value with the given key. +func (cache *Cache) Load(key interface{}) interface{} { + cache.mu.Lock() + defer cache.mu.Unlock() + + if cache.values == nil { + return nil + } + + return cache.values[key] +} + +// LoadOrCreate returns the value with the given key. +func (cache *Cache) LoadOrCreate(key interface{}, fn func() interface{}) interface{} { + cache.mu.Lock() + defer cache.mu.Unlock() + + cache.init() + + value, ok := cache.values[key] + if !ok { + value = fn() + cache.values[key] = value + } + + return value +} diff --git a/vendor/storj.io/drpc/drpccache/doc.go b/vendor/storj.io/drpc/drpccache/doc.go new file mode 100644 index 000000000..6926e5d4e --- /dev/null +++ b/vendor/storj.io/drpc/drpccache/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpccache implements per stream cache for drpc. +package drpccache diff --git a/vendor/storj.io/drpc/drpcmanager/manager.go b/vendor/storj.io/drpc/drpcmanager/manager.go index 9aecb72fa..ce96826b2 100644 --- a/vendor/storj.io/drpc/drpcmanager/manager.go +++ b/vendor/storj.io/drpc/drpcmanager/manager.go @@ -11,6 +11,7 @@ import ( "github.com/zeebo/errs" "storj.io/drpc" + "storj.io/drpc/drpccache" "storj.io/drpc/drpcctx" "storj.io/drpc/drpcdebug" "storj.io/drpc/drpcmetadata" @@ -170,6 +171,8 @@ func (m *Manager) NewServerStream(ctx context.Context) (stream *drpcstream.Strea return nil, "", err } + callerCache := drpccache.FromContext(ctx) + var metadata drpcwire.Packet for { @@ -192,6 +195,11 @@ func (m *Manager) NewServerStream(ctx context.Context) (stream *drpcstream.Strea case drpcwire.KindInvoke: streamCtx := m.ctx + + if callerCache != nil { + streamCtx = drpccache.WithContext(streamCtx, callerCache) + } + if metadata.ID.Stream == pkt.ID.Stream { md, err := drpcmetadata.Decode(metadata.Data) if err != nil { @@ -271,13 +279,6 @@ func (m *Manager) manageStream(ctx context.Context, stream *drpcstream.Stream) { go m.manageStreamContext(ctx, wg, stream) wg.Wait() - // always ensure the stream is terminated if we're done managing it. the - // stream should already be in a terminal state unless we're exiting due - // to the manager terminating. that only happens if the underlying transport - // died, so just assume the remote end issued a cancel by terminating - // the transport. - stream.Cancel(context.Canceled) - // release semaphore <-m.sem } @@ -294,6 +295,7 @@ func (m *Manager) manageStreamPackets(wg *sync.WaitGroup, stream *drpcstream.Str for { select { case <-m.term.Signal(): + stream.Cancel(context.Canceled) return case <-stream.Terminated(): @@ -321,6 +323,7 @@ func (m *Manager) manageStreamContext(ctx context.Context, wg *sync.WaitGroup, s select { case <-m.term.Signal(): + stream.Cancel(context.Canceled) return case <-stream.Terminated(): diff --git a/vendor/storj.io/drpc/drpcmetadata/README.md b/vendor/storj.io/drpc/drpcmetadata/README.md index 70cfcefff..8e7632559 100644 --- a/vendor/storj.io/drpc/drpcmetadata/README.md +++ b/vendor/storj.io/drpc/drpcmetadata/README.md @@ -17,21 +17,28 @@ Add associates a key/value pair on the context. #### func AddPairs ```go -func AddPairs(ctx context.Context, md map[string]string) context.Context +func AddPairs(ctx context.Context, metadata map[string]string) context.Context ``` AddPairs attaches metadata onto a context and return the context. #### func Decode ```go -func Decode(data []byte) (*invoke.InvokeMetadata, error) +func Decode(data []byte) (map[string]string, error) ``` -Decode translate byte form of metadata into metadata struct defined by protobuf. +Decode translate byte form of metadata into key/value metadata. #### func Encode ```go -func Encode(buffer []byte) ([]byte, error) +func Encode(buffer []byte, metadata map[string]string) ([]byte, error) ``` Encode generates byte form of the metadata and appends it onto the passed in buffer. + +#### func Get + +```go +func Get(ctx context.Context) (map[string]string, bool) +``` +Get returns all key/value pairs on the given context. diff --git a/vendor/storj.io/drpc/drpcmetadata/invoke/README.md b/vendor/storj.io/drpc/drpcmetadata/invoke/README.md index e416ee5d3..9ce369862 100644 --- a/vendor/storj.io/drpc/drpcmetadata/invoke/README.md +++ b/vendor/storj.io/drpc/drpcmetadata/invoke/README.md @@ -1,17 +1,17 @@ # package invoke -`import "storj.io/drpc/drpcmetadata/invoke"` +`import "storj.io/drpc/drpcmetadata/invoke"` Package invoke defines the proto messages exposed by drpc for sending metadata across the wire. ## Usage -#### type InvokeMetadata +#### type Metadata ```go -type InvokeMetadata struct { - Data map[string]string `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +type Metadata struct { + Data map[string]string `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -19,62 +19,62 @@ type InvokeMetadata struct { ``` -#### func (*InvokeMetadata) Descriptor +#### func (*Metadata) Descriptor ```go -func (*InvokeMetadata) Descriptor() ([]byte, []int) +func (*Metadata) Descriptor() ([]byte, []int) ``` -#### func (*InvokeMetadata) GetData +#### func (*Metadata) GetData ```go -func (m *InvokeMetadata) GetData() map[string]string +func (m *Metadata) GetData() map[string]string ``` -#### func (*InvokeMetadata) ProtoMessage +#### func (*Metadata) ProtoMessage ```go -func (*InvokeMetadata) ProtoMessage() +func (*Metadata) ProtoMessage() ``` -#### func (*InvokeMetadata) Reset +#### func (*Metadata) Reset ```go -func (m *InvokeMetadata) Reset() +func (m *Metadata) Reset() ``` -#### func (*InvokeMetadata) String +#### func (*Metadata) String ```go -func (m *InvokeMetadata) String() string +func (m *Metadata) String() string ``` -#### func (*InvokeMetadata) XXX_DiscardUnknown +#### func (*Metadata) XXX_DiscardUnknown ```go -func (m *InvokeMetadata) XXX_DiscardUnknown() +func (m *Metadata) XXX_DiscardUnknown() ``` -#### func (*InvokeMetadata) XXX_Marshal +#### func (*Metadata) XXX_Marshal ```go -func (m *InvokeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) ``` -#### func (*InvokeMetadata) XXX_Merge +#### func (*Metadata) XXX_Merge ```go -func (m *InvokeMetadata) XXX_Merge(src proto.Message) +func (m *Metadata) XXX_Merge(src proto.Message) ``` -#### func (*InvokeMetadata) XXX_Size +#### func (*Metadata) XXX_Size ```go -func (m *InvokeMetadata) XXX_Size() int +func (m *Metadata) XXX_Size() int ``` -#### func (*InvokeMetadata) XXX_Unmarshal +#### func (*Metadata) XXX_Unmarshal ```go -func (m *InvokeMetadata) XXX_Unmarshal(b []byte) error +func (m *Metadata) XXX_Unmarshal(b []byte) error ``` diff --git a/vendor/storj.io/drpc/drpcmetadata/metadata.go b/vendor/storj.io/drpc/drpcmetadata/metadata.go index d3e8a12e7..3cf6aed30 100644 --- a/vendor/storj.io/drpc/drpcmetadata/metadata.go +++ b/vendor/storj.io/drpc/drpcmetadata/metadata.go @@ -12,51 +12,30 @@ import ( ) // AddPairs attaches metadata onto a context and return the context. -func AddPairs(ctx context.Context, md map[string]string) context.Context { - if len(md) < 1 { - return ctx - } - - for key, val := range md { +func AddPairs(ctx context.Context, metadata map[string]string) context.Context { + for key, val := range metadata { ctx = Add(ctx, key, val) } - return ctx } // Encode generates byte form of the metadata and appends it onto the passed in buffer. -func Encode(buffer []byte, md map[string]string) ([]byte, error) { - if len(md) < 1 { - return buffer, nil - } - - msg := invoke.Metadata{ - Data: md, - } - - msgBytes, err := proto.Marshal(&msg) +func Encode(buffer []byte, metadata map[string]string) ([]byte, error) { + data, err := proto.Marshal(&invoke.Metadata{Data: metadata}) if err != nil { return buffer, err } - - buffer = append(buffer, msgBytes...) - - return buffer, nil + return append(buffer, data...), nil } // Decode translate byte form of metadata into key/value metadata. func Decode(data []byte) (map[string]string, error) { - if len(data) < 1 { - return map[string]string{}, nil - } - - msg := invoke.Metadata{} - err := proto.Unmarshal(data, &msg) + var md invoke.Metadata + err := proto.Unmarshal(data, &md) if err != nil { return nil, err } - - return msg.Data, nil + return md.Data, nil } type metadataKey struct{} diff --git a/vendor/storj.io/drpc/go.mod b/vendor/storj.io/drpc/go.mod index 4c560eb0f..c37f759e3 100644 --- a/vendor/storj.io/drpc/go.mod +++ b/vendor/storj.io/drpc/go.mod @@ -1,6 +1,6 @@ module storj.io/drpc -go 1.14 +go 1.13 require ( github.com/gogo/protobuf v1.2.1 diff --git a/vendor/storj.io/uplink/access.go b/vendor/storj.io/uplink/access.go index a238d3f92..5a6a1e892 100644 --- a/vendor/storj.io/uplink/access.go +++ b/vendor/storj.io/uplink/access.go @@ -181,7 +181,7 @@ func requestAccessWithPassphraseAndConcurrency(ctx context.Context, config Confi info, err := metainfo.GetProjectInfo(ctx) if err != nil { - return nil, convertKnownErrors(err, "") + return nil, convertKnownErrors(err, "", "") } key, err := encryption.DeriveRootKey([]byte(passphrase), info.ProjectSalt, "", concurrency) diff --git a/vendor/storj.io/uplink/bucket.go b/vendor/storj.io/uplink/bucket.go index 00789ca78..02c9fc07e 100644 --- a/vendor/storj.io/uplink/bucket.go +++ b/vendor/storj.io/uplink/bucket.go @@ -36,16 +36,11 @@ type Bucket struct { // StatBucket returns information about a bucket. func (project *Project) StatBucket(ctx context.Context, bucket string) (info *Bucket, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) b, err := project.project.GetBucket(ctx, bucket) if err != nil { - if storj.ErrNoBucket.Has(err) { - return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) - } else if storj.ErrBucketNotFound.Has(err) { - return nil, errwrapf("%w (%q)", ErrBucketNotFound, bucket) - } - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, "") } return &Bucket{ @@ -58,7 +53,7 @@ func (project *Project) StatBucket(ctx context.Context, bucket string) (info *Bu // // When bucket already exists it returns a valid Bucket and ErrBucketExists. func (project *Project) CreateBucket(ctx context.Context, bucket string) (created *Bucket, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) // TODO remove bucket configuration when proper fix will be deployed on satellite b, err := project.project.CreateBucket(ctx, bucket, &storj.Bucket{ @@ -81,11 +76,11 @@ func (project *Project) CreateBucket(ctx context.Context, bucket string) (create // TODO: Ideally, the satellite should return the existing bucket when this error occurs. existing, err := project.StatBucket(ctx, bucket) if err != nil { - return existing, errs.Combine(errwrapf("%w (%q)", ErrBucketAlreadyExists, bucket), convertKnownErrors(err, bucket)) + return existing, errs.Combine(errwrapf("%w (%q)", ErrBucketAlreadyExists, bucket), convertKnownErrors(err, bucket, "")) } return existing, errwrapf("%w (%q)", ErrBucketAlreadyExists, bucket) } - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, "") } return &Bucket{ @@ -98,11 +93,11 @@ func (project *Project) CreateBucket(ctx context.Context, bucket string) (create // // When bucket already exists it returns a valid Bucket and no error. func (project *Project) EnsureBucket(ctx context.Context, bucket string) (ensured *Bucket, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) ensured, err = project.CreateBucket(ctx, bucket) if err != nil && !errors.Is(err, ErrBucketAlreadyExists) { - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, "") } return ensured, nil @@ -112,18 +107,14 @@ func (project *Project) EnsureBucket(ctx context.Context, bucket string) (ensure // // When bucket is not empty it returns ErrBucketNotEmpty. func (project *Project) DeleteBucket(ctx context.Context, bucket string) (deleted *Bucket, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) existing, err := project.project.DeleteBucket(ctx, bucket) if err != nil { if errs2.IsRPC(err, rpcstatus.FailedPrecondition) { return nil, errwrapf("%w (%q)", ErrBucketNotEmpty, bucket) - } else if storj.ErrBucketNotFound.Has(err) { - return nil, errwrapf("%w (%q)", ErrBucketNotFound, bucket) - } else if storj.ErrNoBucket.Has(err) { - return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) } - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, "") } if existing == (storj.Bucket{}) { diff --git a/vendor/storj.io/uplink/buckets.go b/vendor/storj.io/uplink/buckets.go index 181ee2b08..bc372d243 100644 --- a/vendor/storj.io/uplink/buckets.go +++ b/vendor/storj.io/uplink/buckets.go @@ -17,7 +17,7 @@ type ListBucketsOptions struct { // ListBuckets returns an iterator over the buckets. func (project *Project) ListBuckets(ctx context.Context, options *ListBucketsOptions) *BucketIterator { - defer mon.Func().ResetTrace(&ctx)(nil) + defer mon.Func().RestartTrace(&ctx)(nil) opts := storj.BucketListOptions{ Direction: storj.After, @@ -79,7 +79,7 @@ func (buckets *BucketIterator) Next() bool { func (buckets *BucketIterator) loadNext() bool { list, err := buckets.project.db.ListBuckets(buckets.ctx, buckets.options) if err != nil { - buckets.err = err + buckets.err = convertKnownErrors(err, "", "") return false } buckets.list = &list diff --git a/vendor/storj.io/uplink/common.go b/vendor/storj.io/uplink/common.go index 701737054..6ab1ed8c6 100644 --- a/vendor/storj.io/uplink/common.go +++ b/vendor/storj.io/uplink/common.go @@ -27,8 +27,17 @@ var ErrTooManyRequests = errors.New("too many requests") // ErrBandwidthLimitExceeded is returned when project will exceeded bandwidth limit. var ErrBandwidthLimitExceeded = errors.New("bandwidth limit exceeded") -func convertKnownErrors(err error, bucket string) error { - if errs2.IsRPC(err, rpcstatus.ResourceExhausted) { +func convertKnownErrors(err error, bucket, key string) error { + switch { + case storj.ErrNoBucket.Has(err): + return errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) + case storj.ErrNoPath.Has(err): + return errwrapf("%w (%q)", ErrObjectKeyInvalid, key) + case storj.ErrBucketNotFound.Has(err): + return errwrapf("%w (%q)", ErrBucketNotFound, bucket) + case storj.ErrObjectNotFound.Has(err): + return errwrapf("%w (%q)", ErrObjectNotFound, key) + case errs2.IsRPC(err, rpcstatus.ResourceExhausted): // TODO is a better way to do this? message := errs.Unwrap(err).Error() if message == "Exceeded Usage Limit" { @@ -36,12 +45,12 @@ func convertKnownErrors(err error, bucket string) error { } else if message == "Too Many Requests" { return packageError.Wrap(ErrTooManyRequests) } - } else if errs2.IsRPC(err, rpcstatus.NotFound) { + case errs2.IsRPC(err, rpcstatus.NotFound): message := errs.Unwrap(err).Error() if strings.HasPrefix(message, storj.ErrBucketNotFound.New("").Error()) { return errwrapf("%w (%q)", ErrBucketNotFound, bucket) } else if strings.HasPrefix(message, storj.ErrObjectNotFound.New("").Error()) { - return packageError.Wrap(ErrObjectNotFound) + return errwrapf("%w (%q)", ErrObjectNotFound, key) } } diff --git a/vendor/storj.io/uplink/doc.go b/vendor/storj.io/uplink/doc.go index 30bff47ec..f7e343e17 100644 --- a/vendor/storj.io/uplink/doc.go +++ b/vendor/storj.io/uplink/doc.go @@ -89,7 +89,7 @@ items. return err } -Objects +Download Object Objects support a couple kilobytes of arbitrary key/value metadata, and arbitrary-size primary data streams with the ability to read at arbitrary offsets. @@ -116,5 +116,17 @@ If you want to access only a small subrange of the data you uploaded, you can us _, err = io.Copy(w, object) return err +List Objects + +Listing objects returns an iterator that allows to walk through all the items: + + objects := project.ListObjects(ctx, "logs", nil) + for objects.Next() { + item := objects.Item() + fmt.Println(item.IsPrefix, item.Key) + } + if err := objects.Err(); err != nil { + return err + } */ package uplink diff --git a/vendor/storj.io/uplink/download.go b/vendor/storj.io/uplink/download.go index 8750010e5..3491a575b 100644 --- a/vendor/storj.io/uplink/download.go +++ b/vendor/storj.io/uplink/download.go @@ -19,7 +19,7 @@ type DownloadOptions struct { // DownloadObject starts a download from the specific key. func (project *Project) DownloadObject(ctx context.Context, bucket, key string, options *DownloadOptions) (download *Download, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) if bucket == "" { return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) @@ -39,12 +39,7 @@ func (project *Project) DownloadObject(ctx context.Context, bucket, key string, obj, err := project.db.GetObject(ctx, b, key) if err != nil { - if storj.ErrNoPath.Has(err) { - return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) - } else if storj.ErrObjectNotFound.Has(err) { - return nil, errwrapf("%w (%q)", ErrObjectNotFound, key) - } - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, key) } objectStream, err := project.db.GetObjectStream(ctx, b, obj) @@ -71,8 +66,8 @@ func (download *Download) Info() *Object { // Read downloads up to len(p) bytes into p from the object's data stream. // It returns the number of bytes read (0 <= n <= len(p)) and any error encountered. -func (download *Download) Read(data []byte) (n int, err error) { - return download.download.Read(data) +func (download *Download) Read(p []byte) (n int, err error) { + return download.download.Read(p) } // Close closes the reader of the download. diff --git a/vendor/storj.io/uplink/encryption.go b/vendor/storj.io/uplink/encryption.go index 3dd9ce7c8..502dae948 100644 --- a/vendor/storj.io/uplink/encryption.go +++ b/vendor/storj.io/uplink/encryption.go @@ -17,7 +17,7 @@ type encryptionAccess struct { store *encryption.Store } -// newEncryptionAccess creates an encryption access context +// newEncryptionAccess creates an encryption access context. func newEncryptionAccess() *encryptionAccess { store := encryption.NewStore() return &encryptionAccess{store: store} @@ -25,7 +25,7 @@ func newEncryptionAccess() *encryptionAccess { // newEncryptionAccessWithDefaultKey creates an encryption access context with // a default key set. -// Use (*Project).SaltedKeyFromPassphrase to generate a default key +// Use (*Project).SaltedKeyFromPassphrase to generate a default key. func newEncryptionAccessWithDefaultKey(defaultKey *storj.Key) *encryptionAccess { ec := newEncryptionAccess() ec.setDefaultKey(defaultKey) @@ -38,7 +38,7 @@ func (s *encryptionAccess) Store() *encryption.Store { } // setDefaultKey sets the default key for the encryption access context. -// Use (*Project).SaltedKeyFromPassphrase to generate a default key +// Use (*Project).SaltedKeyFromPassphrase to generate a default key. func (s *encryptionAccess) setDefaultKey(defaultKey *storj.Key) { s.store.SetDefaultKey(defaultKey) } diff --git a/vendor/storj.io/uplink/go.mod b/vendor/storj.io/uplink/go.mod index 5457898c5..3b620d973 100644 --- a/vendor/storj.io/uplink/go.mod +++ b/vendor/storj.io/uplink/go.mod @@ -5,11 +5,11 @@ go 1.13 require ( github.com/btcsuite/btcutil v1.0.1 github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 // indirect - github.com/spacemonkeygo/monkit/v3 v3.0.6 + github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 github.com/stretchr/testify v1.4.0 github.com/vivint/infectious v0.0.0-20190108171102-2455b059135b github.com/zeebo/errs v1.2.2 go.uber.org/zap v1.10.0 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - storj.io/common v0.0.0-20200429074521-4ba140e4b747 + storj.io/common v0.0.0-20200519171747-3ff8acf78c46 ) diff --git a/vendor/storj.io/uplink/go.sum b/vendor/storj.io/uplink/go.sum index f7ccbff1e..ef208d087 100644 --- a/vendor/storj.io/uplink/go.sum +++ b/vendor/storj.io/uplink/go.sum @@ -1,6 +1,3 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -14,25 +11,14 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/calebcase/tmpfile v1.0.1 h1:vD8FSrbsbexhep39/6mvtbIHS3GzIRqiprDNCF6QqSk= github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= @@ -48,7 +34,6 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 h1:xHQewZjohU9/wUsyC99navCjQDNHtTgUOM/J1jAbzfw= github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1/go.mod h1:7NL9UAYQnRM5iKHUCld3tf02fKb5Dft+41+VckASUy0= github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= @@ -56,8 +41,8 @@ github.com/spacemonkeygo/monkit/v3 v3.0.4 h1:Ay+PZirv+qfd4sqcT+X/U3BnC7AcIaqp/IX github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.5 h1:vMW8Ne6WAUU/OMYaSv7KGW9h/sRNgeh6TyBapOSuMhM= github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= -github.com/spacemonkeygo/monkit/v3 v3.0.6 h1:BKPrEaLokVAxlwHkD7jawViBa/IU9/bgXbZLWgjbdSM= -github.com/spacemonkeygo/monkit/v3 v3.0.6/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= +github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 h1:WcQDknqg0qajLNYKv3mXgbkWlYs5rPgZehGJFWePHVI= +github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a h1:8+cCjxhToanKmxLIbuyBNe2EnpgwhiivsIaRJstDRFA= github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= @@ -91,25 +76,13 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -117,22 +90,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf h1:9cZxTVBvFZgOnVi/DobY3JsafbPFPnP2rtN81d4wPpw= golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -140,15 +98,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -storj.io/common v0.0.0-20200422101411-db8726ab073a h1:gKPJnNWeBL49/NNOglQL2bLt/JyMvSAow8gh5e+aDGk= -storj.io/common v0.0.0-20200422101411-db8726ab073a/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= -storj.io/common v0.0.0-20200423123959-c1b3f92807ea h1:TV7/Do6oYxdVOnp43nPyFmxxKHbWobjDc6k42zXvrx0= -storj.io/common v0.0.0-20200423123959-c1b3f92807ea/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= -storj.io/common v0.0.0-20200424175742-65ac59022f4f h1:HPRWr2HQzPD12vhHIaYhh9HT0vYlULkqA453YEV/BXU= -storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= -storj.io/common v0.0.0-20200429074521-4ba140e4b747 h1:Ne1x0M80uNyN6tHIs15CGJqHbreKbvH5BOq4jdWsqMc= -storj.io/common v0.0.0-20200429074521-4ba140e4b747/go.mod h1:lfsaMdtHwrUOtSYkw73meCyZMUYiaFBKVqx6zgeSz2o= -storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw= -storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw= +storj.io/common v0.0.0-20200519171747-3ff8acf78c46 h1:Yx73D928PKtyQYPXHuQ5WFES4t+0nufxbhwyf8VodMw= +storj.io/common v0.0.0-20200519171747-3ff8acf78c46/go.mod h1:hqUDJlDHU1kZuZmfLohWgGa0Cf3pL1IH8DsxLCsamNQ= +storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA= +storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA= diff --git a/vendor/storj.io/uplink/internal/expose/exposed.go b/vendor/storj.io/uplink/internal/expose/exposed.go index 46a6e894c..d13edfc08 100644 --- a/vendor/storj.io/uplink/internal/expose/exposed.go +++ b/vendor/storj.io/uplink/internal/expose/exposed.go @@ -3,12 +3,12 @@ package expose -// RequestAccessWithPassphraseAndConcurrency exposes uplink.requestAccessWithPassphraseAndConcurrency +// RequestAccessWithPassphraseAndConcurrency exposes uplink.requestAccessWithPassphraseAndConcurrency. // -// func RequestAccessWithPassphraseAndConcurrency(ctx context.Context, config uplink.Config, satelliteNodeURL, apiKey, passphrase string, concurrency uint8) (_ *uplink.Access, err error) +// func RequestAccessWithPassphraseAndConcurrency(ctx context.Context, config uplink.Config, satelliteNodeURL, apiKey, passphrase string, concurrency uint8) (_ *uplink.Access, err error). var RequestAccessWithPassphraseAndConcurrency interface{} -// EnablePathEncryptionBypass exposes uplink.enablePathEncryptionBypass +// EnablePathEncryptionBypass exposes uplink.enablePathEncryptionBypass. // -// func EnablePathEncryptionBypass(access *Access) error +// func EnablePathEncryptionBypass(access *Access) error. var EnablePathEncryptionBypass interface{} diff --git a/vendor/storj.io/uplink/object.go b/vendor/storj.io/uplink/object.go index d8dcdc67f..99c145898 100644 --- a/vendor/storj.io/uplink/object.go +++ b/vendor/storj.io/uplink/object.go @@ -80,17 +80,12 @@ func (meta CustomMetadata) Verify() error { // StatObject returns information about an object at the specific key. func (project *Project) StatObject(ctx context.Context, bucket, key string) (info *Object, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) b := storj.Bucket{Name: bucket} obj, err := project.db.GetObject(ctx, b, key) if err != nil { - if storj.ErrNoPath.Has(err) { - return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) - } else if storj.ErrObjectNotFound.Has(err) { - return nil, errwrapf("%w (%q)", ErrObjectNotFound, key) - } - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, key) } return convertObject(&obj), nil @@ -98,17 +93,12 @@ func (project *Project) StatObject(ctx context.Context, bucket, key string) (inf // DeleteObject deletes the object at the specific key. func (project *Project) DeleteObject(ctx context.Context, bucket, key string) (deleted *Object, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) b := storj.Bucket{Name: bucket} obj, err := project.db.DeleteObject(ctx, b, key) if err != nil { - if storj.ErrNoPath.Has(err) { - return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) - } else if storj.ErrObjectNotFound.Has(err) { - return nil, errwrapf("%w (%q)", ErrObjectNotFound, key) - } - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, key) } return convertObject(&obj), nil } diff --git a/vendor/storj.io/uplink/objects.go b/vendor/storj.io/uplink/objects.go index cfae127d3..c42785e0c 100644 --- a/vendor/storj.io/uplink/objects.go +++ b/vendor/storj.io/uplink/objects.go @@ -26,7 +26,7 @@ type ListObjectsOptions struct { // ListObjects returns an iterator over the objects. func (project *Project) ListObjects(ctx context.Context, bucket string, options *ListObjectsOptions) *ObjectIterator { - defer mon.Func().ResetTrace(&ctx)(nil) + defer mon.Func().RestartTrace(&ctx)(nil) b := storj.Bucket{Name: bucket, PathCipher: storj.EncAESGCM} opts := storj.ListOptions{ @@ -98,7 +98,7 @@ func (objects *ObjectIterator) Next() bool { func (objects *ObjectIterator) loadNext() bool { list, err := objects.project.db.ListObjectsExtended(objects.ctx, objects.bucket, objects.options) if err != nil { - objects.err = convertKnownErrors(err, objects.bucket.Name) + objects.err = convertKnownErrors(err, objects.bucket.Name, "") return false } objects.list = &list diff --git a/vendor/storj.io/uplink/private/ecclient/client.go b/vendor/storj.io/uplink/private/ecclient/client.go index 870692dc9..57d3959c0 100644 --- a/vendor/storj.io/uplink/private/ecclient/client.go +++ b/vendor/storj.io/uplink/private/ecclient/client.go @@ -28,16 +28,17 @@ import ( var mon = monkit.Package() -// Client defines an interface for storing erasure coded data to piece store nodes +// Client defines an interface for storing erasure coded data to piece store nodes. type Client interface { Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) + PutSingleResult(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (results []*pb.SegmentPieceUploadResult, err error) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (ranger.Ranger, error) WithForceErrorDetection(force bool) Client // PutPiece is not intended to be used by normal uplinks directly, but is exported to support storagenode graceful exit transfers. PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, id *identity.PeerIdentity, err error) } -type dialPiecestoreFunc func(context.Context, *pb.Node) (*piecestore.Client, error) +type dialPiecestoreFunc func(context.Context, storj.NodeURL) (*piecestore.Client, error) type ecClient struct { log *zap.Logger @@ -46,7 +47,7 @@ type ecClient struct { forceErrorDetection bool } -// NewClient from the given identity and max buffer memory +// NewClient from the given identity and max buffer memory. func NewClient(log *zap.Logger, dialer rpc.Dialer, memoryLimit int) Client { return &ecClient{ log: log, @@ -60,9 +61,9 @@ func (ec *ecClient) WithForceErrorDetection(force bool) Client { return ec } -func (ec *ecClient) dialPiecestore(ctx context.Context, n *pb.Node) (*piecestore.Client, error) { - logger := ec.log.Named(n.Id.String()) - return piecestore.Dial(ctx, ec.dialer, n, logger, piecestore.DefaultConfig) +func (ec *ecClient) dialPiecestore(ctx context.Context, n storj.NodeURL) (*piecestore.Client, error) { + logger := ec.log.Named(n.ID.String()) + return piecestore.DialNodeURL(ctx, ec.dialer, n, logger, piecestore.DefaultConfig) } func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) { @@ -175,6 +176,32 @@ func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, p return successfulNodes, successfulHashes, nil } +func (ec *ecClient) PutSingleResult(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (results []*pb.SegmentPieceUploadResult, err error) { + successfulNodes, successfulHashes, err := ec.Put(ctx, limits, privateKey, rs, data, expiration) + if err != nil { + return nil, err + } + + uploadResults := make([]*pb.SegmentPieceUploadResult, 0, len(successfulNodes)) + for i := range successfulNodes { + if successfulNodes[i] == nil { + continue + } + + uploadResults = append(uploadResults, &pb.SegmentPieceUploadResult{ + PieceNum: int32(i), + NodeId: successfulNodes[i].Id, + Hash: successfulHashes[i], + }) + } + + if l := len(uploadResults); l < rs.OptimalThreshold() { + return nil, Error.New("uploaded results (%d) are below the optimal threshold (%d)", l, rs.OptimalThreshold()) + } + + return uploadResults, nil +} + func (ec *ecClient) PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, peerID *identity.PeerIdentity, err error) { nodeName := "nil" if limit != nil { @@ -190,9 +217,9 @@ func (ec *ecClient) PutPiece(ctx, parent context.Context, limit *pb.AddressedOrd storageNodeID := limit.GetLimit().StorageNodeId pieceID := limit.GetLimit().PieceId - ps, err := ec.dialPiecestore(ctx, &pb.Node{ - Id: storageNodeID, - Address: limit.GetStorageNodeAddress(), + ps, err := ec.dialPiecestore(ctx, storj.NodeURL{ + ID: storageNodeID, + Address: limit.GetStorageNodeAddress().Address, }) if err != nil { ec.log.Debug("Failed dialing for putting piece to node", @@ -323,12 +350,12 @@ type lazyPieceRanger struct { size int64 } -// Size implements Ranger.Size +// Size implements Ranger.Size. func (lr *lazyPieceRanger) Size() int64 { return lr.size } -// Range implements Ranger.Range to be lazily connected +// Range implements Ranger.Range to be lazily connected. func (lr *lazyPieceRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { defer mon.Task()(&ctx)(&err) @@ -374,9 +401,9 @@ func (lr *lazyPieceReader) Read(data []byte) (_ int, err error) { func (lr *lazyPieceRanger) dial(ctx context.Context, offset, length int64) (_ *piecestore.Client, _ piecestore.Downloader, err error) { defer mon.Task()(&ctx)(&err) - ps, err := lr.dialPiecestore(ctx, &pb.Node{ - Id: lr.limit.GetLimit().StorageNodeId, - Address: lr.limit.GetStorageNodeAddress(), + ps, err := lr.dialPiecestore(ctx, storj.NodeURL{ + ID: lr.limit.GetLimit().StorageNodeId, + Address: lr.limit.GetStorageNodeAddress().Address, }) if err != nil { return nil, nil, err diff --git a/vendor/storj.io/uplink/private/ecclient/common.go b/vendor/storj.io/uplink/private/ecclient/common.go index 7d734b05e..02b04bf03 100644 --- a/vendor/storj.io/uplink/private/ecclient/common.go +++ b/vendor/storj.io/uplink/private/ecclient/common.go @@ -7,5 +7,5 @@ import ( "github.com/zeebo/errs" ) -// Error is the errs class of standard Ranger errors +// Error is the errs class of standard Ranger errors. var Error = errs.Class("ecclient error") diff --git a/vendor/storj.io/uplink/private/eestream/common.go b/vendor/storj.io/uplink/private/eestream/common.go index 3c2a26201..de6c489d3 100644 --- a/vendor/storj.io/uplink/private/eestream/common.go +++ b/vendor/storj.io/uplink/private/eestream/common.go @@ -7,5 +7,5 @@ import ( "github.com/zeebo/errs" ) -// Error is the default eestream errs class +// Error is the default eestream errs class. var Error = errs.Class("eestream error") diff --git a/vendor/storj.io/uplink/private/eestream/encode.go b/vendor/storj.io/uplink/private/eestream/encode.go index 421d66752..474db919d 100644 --- a/vendor/storj.io/uplink/private/eestream/encode.go +++ b/vendor/storj.io/uplink/private/eestream/encode.go @@ -44,14 +44,14 @@ type ErasureScheme interface { // from Decode. StripeSize() int - // Encode will generate this many erasure shares and therefore this many pieces + // Encode will generate this many erasure shares and therefore this many pieces. TotalCount() int - // Decode requires at least this many pieces + // Decode requires at least this many pieces. RequiredCount() int } -// RedundancyStrategy is an ErasureScheme with a repair and optimal thresholds +// RedundancyStrategy is an ErasureScheme with a repair and optimal thresholds. type RedundancyStrategy struct { ErasureScheme repairThreshold int @@ -119,13 +119,13 @@ func NewRedundancyStrategyFromStorj(scheme storj.RedundancyScheme) (RedundancySt } // RepairThreshold is the number of available erasure pieces below which -// the data must be repaired to avoid loss +// the data must be repaired to avoid loss. func (rs *RedundancyStrategy) RepairThreshold() int { return rs.repairThreshold } // OptimalThreshold is the number of available erasure pieces above which -// there is no need for the data to be repaired +// there is no need for the data to be repaired. func (rs *RedundancyStrategy) OptimalThreshold() int { return rs.optimalThreshold } @@ -271,7 +271,7 @@ func (er *EncodedRanger) OutputSize() int64 { return blocks * int64(er.rs.ErasureShareSize()) } -// Range is like Ranger.Range, but returns a slice of Readers +// Range is like Ranger.Range, but returns a slice of Readers. func (er *EncodedRanger) Range(ctx context.Context, offset, length int64) (_ []io.ReadCloser, err error) { defer mon.Task()(&ctx)(&err) // the offset and length given may not be block-aligned, so let's figure diff --git a/vendor/storj.io/uplink/private/eestream/stripe.go b/vendor/storj.io/uplink/private/eestream/stripe.go index 62b91bec4..743312d74 100644 --- a/vendor/storj.io/uplink/private/eestream/stripe.go +++ b/vendor/storj.io/uplink/private/eestream/stripe.go @@ -20,7 +20,7 @@ var ( mon = monkit.Package() ) -// StripeReader can read and decodes stripes from a set of readers +// StripeReader can read and decodes stripes from a set of readers. type StripeReader struct { scheme ErasureScheme cond *sync.Cond @@ -94,6 +94,8 @@ var backcompatMon = monkit.ScopeNamed("storj.io/storj/uplink/eestream") // ReadStripe reads and decodes the num-th stripe and concatenates it to p. The // return value is the updated byte slice. func (r *StripeReader) ReadStripe(ctx context.Context, num int64, p []byte) (_ []byte, err error) { + defer mon.Task()(&ctx, num)(&err) + for i := range r.inmap { delete(r.inmap, i) } diff --git a/vendor/storj.io/uplink/private/metainfo/batch.go b/vendor/storj.io/uplink/private/metainfo/batch.go index 9908a2694..4d3c8f52a 100644 --- a/vendor/storj.io/uplink/private/metainfo/batch.go +++ b/vendor/storj.io/uplink/private/metainfo/batch.go @@ -11,22 +11,22 @@ import ( ) var ( - // ErrInvalidType error for inalid response type casting + // ErrInvalidType error for inalid response type casting. ErrInvalidType = errs.New("invalid response type") ) -// BatchItem represents single request in batch +// BatchItem represents single request in batch. type BatchItem interface { BatchItem() *pb.BatchRequestItem } -// BatchResponse single response from batch call +// BatchResponse single response from batch call. type BatchResponse struct { pbRequest interface{} pbResponse interface{} } -// CreateBucket returns BatchResponse for CreateBucket request +// CreateBucket returns BatchResponse for CreateBucket request. func (resp *BatchResponse) CreateBucket() (CreateBucketResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketCreate) if !ok { @@ -40,7 +40,7 @@ func (resp *BatchResponse) CreateBucket() (CreateBucketResponse, error) { return createResponse, nil } -// GetBucket returns response for GetBucket request +// GetBucket returns response for GetBucket request. func (resp *BatchResponse) GetBucket() (GetBucketResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketGet) if !ok { @@ -53,7 +53,7 @@ func (resp *BatchResponse) GetBucket() (GetBucketResponse, error) { return getResponse, nil } -// ListBuckets returns response for ListBuckets request +// ListBuckets returns response for ListBuckets request. func (resp *BatchResponse) ListBuckets() (ListBucketsResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketList) if !ok { @@ -62,7 +62,7 @@ func (resp *BatchResponse) ListBuckets() (ListBucketsResponse, error) { return newListBucketsResponse(item.BucketList), nil } -// BeginObject returns response for BeginObject request +// BeginObject returns response for BeginObject request. func (resp *BatchResponse) BeginObject() (BeginObjectResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBegin) if !ok { @@ -77,7 +77,7 @@ func (resp *BatchResponse) BeginObject() (BeginObjectResponse, error) { return newBeginObjectResponse(item.ObjectBegin, rs), nil } -// BeginDeleteObject returns response for BeginDeleteObject request +// BeginDeleteObject returns response for BeginDeleteObject request. func (resp *BatchResponse) BeginDeleteObject() (BeginDeleteObjectResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBeginDelete) if !ok { @@ -86,7 +86,7 @@ func (resp *BatchResponse) BeginDeleteObject() (BeginDeleteObjectResponse, error return newBeginDeleteObjectResponse(item.ObjectBeginDelete), nil } -// GetObject returns response for GetObject request +// GetObject returns response for GetObject request. func (resp *BatchResponse) GetObject() (GetObjectResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectGet) if !ok { @@ -95,7 +95,7 @@ func (resp *BatchResponse) GetObject() (GetObjectResponse, error) { return newGetObjectResponse(item.ObjectGet), nil } -// ListObjects returns response for ListObjects request +// ListObjects returns response for ListObjects request. func (resp *BatchResponse) ListObjects() (ListObjectsResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectList) if !ok { @@ -110,7 +110,7 @@ func (resp *BatchResponse) ListObjects() (ListObjectsResponse, error) { return newListObjectsResponse(item.ObjectList, requestItem.ObjectList.EncryptedPrefix, requestItem.ObjectList.Recursive), nil } -// BeginSegment returns response for BeginSegment request +// BeginSegment returns response for BeginSegment request. func (resp *BatchResponse) BeginSegment() (BeginSegmentResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBegin) if !ok { @@ -120,7 +120,7 @@ func (resp *BatchResponse) BeginSegment() (BeginSegmentResponse, error) { return newBeginSegmentResponse(item.SegmentBegin), nil } -// BeginDeleteSegment returns response for BeginDeleteSegment request +// BeginDeleteSegment returns response for BeginDeleteSegment request. func (resp *BatchResponse) BeginDeleteSegment() (BeginDeleteSegmentResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBeginDelete) if !ok { @@ -130,7 +130,7 @@ func (resp *BatchResponse) BeginDeleteSegment() (BeginDeleteSegmentResponse, err return newBeginDeleteSegmentResponse(item.SegmentBeginDelete), nil } -// ListSegment returns response for ListSegment request +// ListSegment returns response for ListSegment request. func (resp *BatchResponse) ListSegment() (ListSegmentsResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentList) if !ok { @@ -139,7 +139,7 @@ func (resp *BatchResponse) ListSegment() (ListSegmentsResponse, error) { return newListSegmentsResponse(item.SegmentList), nil } -// DownloadSegment returns response for DownloadSegment request +// DownloadSegment returns response for DownloadSegment request. func (resp *BatchResponse) DownloadSegment() (DownloadSegmentResponse, error) { item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentDownload) if !ok { diff --git a/vendor/storj.io/uplink/private/metainfo/client.go b/vendor/storj.io/uplink/private/metainfo/client.go index 3473885de..57914e1cf 100644 --- a/vendor/storj.io/uplink/private/metainfo/client.go +++ b/vendor/storj.io/uplink/private/metainfo/client.go @@ -80,7 +80,7 @@ func DialNodeURL(ctx context.Context, dialer rpc.Dialer, nodeURL string, apiKey return nil, Error.New("node ID is required in node URL %q", nodeURL) } - conn, err := dialer.DialAddressID(ctx, url.Address, url.ID) + conn, err := dialer.DialNodeURL(ctx, url) if err != nil { return nil, Error.Wrap(err) } @@ -457,7 +457,7 @@ func (params *BeginObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectB } } -// BatchItem returns single item for batch request.... +// BatchItem returns single item for batch request. func (params *BeginObjectParams) BatchItem() *pb.BatchRequestItem { return &pb.BatchRequestItem{ Request: &pb.BatchRequestItem_ObjectBegin{ diff --git a/vendor/storj.io/uplink/private/metainfo/client_old.go b/vendor/storj.io/uplink/private/metainfo/client_old.go index ed8cf6c58..5c05b8e42 100644 --- a/vendor/storj.io/uplink/private/metainfo/client_old.go +++ b/vendor/storj.io/uplink/private/metainfo/client_old.go @@ -14,7 +14,7 @@ import ( "storj.io/common/uuid" ) -// CreateSegmentOld requests the order limits for creating a new segment +// CreateSegmentOld requests the order limits for creating a new segment. func (client *Client) CreateSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, redundancy *pb.RedundancyScheme, maxEncryptedSegmentSize int64, expiration time.Time) (limits []*pb.AddressedOrderLimit, rootPieceID storj.PieceID, piecePrivateKey storj.PiecePrivateKey, err error) { defer mon.Task()(&ctx)(&err) @@ -34,7 +34,7 @@ func (client *Client) CreateSegmentOld(ctx context.Context, bucket string, path return response.GetAddressedLimits(), response.RootPieceId, response.PrivateKey, nil } -// CommitSegmentOld requests to store the pointer for the segment +// CommitSegmentOld requests to store the pointer for the segment. func (client *Client) CommitSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, pointer *pb.Pointer, originalLimits []*pb.OrderLimit) (savedPointer *pb.Pointer, err error) { defer mon.Task()(&ctx)(&err) @@ -53,7 +53,7 @@ func (client *Client) CommitSegmentOld(ctx context.Context, bucket string, path return response.GetPointer(), nil } -// SegmentInfoOld requests the pointer of a segment +// SegmentInfoOld requests the pointer of a segment. func (client *Client) SegmentInfoOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, err error) { defer mon.Task()(&ctx)(&err) @@ -73,7 +73,7 @@ func (client *Client) SegmentInfoOld(ctx context.Context, bucket string, path st return response.GetPointer(), nil } -// ReadSegmentOld requests the order limits for reading a segment +// ReadSegmentOld requests the order limits for reading a segment. func (client *Client) ReadSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) { defer mon.Task()(&ctx)(&err) @@ -93,7 +93,7 @@ func (client *Client) ReadSegmentOld(ctx context.Context, bucket string, path st return response.GetPointer(), sortLimits(response.GetAddressedLimits(), response.GetPointer()), response.PrivateKey, nil } -// sortLimits sorts order limits and fill missing ones with nil values +// sortLimits sorts order limits and fill missing ones with nil values. func sortLimits(limits []*pb.AddressedOrderLimit, pointer *pb.Pointer) []*pb.AddressedOrderLimit { sorted := make([]*pb.AddressedOrderLimit, pointer.GetRemote().GetRedundancy().GetTotal()) for _, piece := range pointer.GetRemote().GetRemotePieces() { @@ -111,7 +111,7 @@ func getLimitByStorageNodeID(limits []*pb.AddressedOrderLimit, storageNodeID sto return nil } -// DeleteSegmentOld requests the order limits for deleting a segment +// DeleteSegmentOld requests the order limits for deleting a segment. func (client *Client) DeleteSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) { defer mon.Task()(&ctx)(&err) @@ -131,7 +131,7 @@ func (client *Client) DeleteSegmentOld(ctx context.Context, bucket string, path return response.GetAddressedLimits(), response.PrivateKey, nil } -// ListSegmentsOld lists the available segments +// ListSegmentsOld lists the available segments. func (client *Client) ListSegmentsOld(ctx context.Context, bucket string, prefix, startAfter, ignoredEndBefore storj.Path, recursive bool, limit int32, metaFlags uint32) (items []ListItem, more bool, err error) { defer mon.Task()(&ctx)(&err) diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go index 8b9d66072..5123898c8 100644 --- a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go @@ -13,7 +13,7 @@ import ( "storj.io/uplink/private/metainfo" ) -// CreateBucket creates a new bucket +// CreateBucket creates a new bucket. func (db *Project) CreateBucket(ctx context.Context, bucketName string, info *storj.Bucket) (_ storj.Bucket, err error) { defer mon.Task()(&ctx)(&err) @@ -78,7 +78,7 @@ func validateBlockSize(redundancyScheme storj.RedundancyScheme, blockSize int32) return nil } -// DeleteBucket deletes bucket +// DeleteBucket deletes bucket. func (db *Project) DeleteBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) { defer mon.Task()(&ctx)(&err) @@ -95,7 +95,7 @@ func (db *Project) DeleteBucket(ctx context.Context, bucketName string) (_ storj return bucket, nil } -// GetBucket gets bucket information +// GetBucket gets bucket information. func (db *Project) GetBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) { defer mon.Task()(&ctx)(&err) @@ -113,7 +113,7 @@ func (db *Project) GetBucket(ctx context.Context, bucketName string) (_ storj.Bu return bucket, nil } -// ListBuckets lists buckets +// ListBuckets lists buckets. func (db *Project) ListBuckets(ctx context.Context, listOpts storj.BucketListOptions) (_ storj.BucketList, err error) { defer mon.Task()(&ctx)(&err) diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go index 6a6383aa6..32341760b 100644 --- a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go @@ -10,7 +10,7 @@ import ( "storj.io/common/storj" ) -// CreateObject has optional parameters that can be set +// CreateObject has optional parameters that can be set. type CreateObject struct { Metadata map[string]string ContentType string @@ -20,7 +20,7 @@ type CreateObject struct { storj.EncryptionParameters } -// Object converts the CreateObject to an object with unitialized values +// Object converts the CreateObject to an object with unitialized values. func (create CreateObject) Object(bucket storj.Bucket, path storj.Path) storj.Object { return storj.Object{ Bucket: bucket, @@ -40,7 +40,7 @@ func (create CreateObject) Object(bucket storj.Bucket, path storj.Path) storj.Ob } } -// ReadOnlyStream is an interface for reading segment information +// ReadOnlyStream is an interface for reading segment information. type ReadOnlyStream interface { Info() storj.Object @@ -52,23 +52,23 @@ type ReadOnlyStream interface { Segments(ctx context.Context, index int64, limit int64) (infos []storj.Segment, more bool, err error) } -// MutableObject is an interface for manipulating creating/deleting object stream +// MutableObject is an interface for manipulating creating/deleting object stream. type MutableObject interface { - // Info gets the current information about the object + // Info gets the current information about the object. Info() storj.Object - // CreateStream creates a new stream for the object + // CreateStream creates a new stream for the object. CreateStream(ctx context.Context) (MutableStream, error) // ContinueStream starts to continue a partially uploaded stream. ContinueStream(ctx context.Context) (MutableStream, error) - // DeleteStream deletes any information about this objects stream + // DeleteStream deletes any information about this objects stream. DeleteStream(ctx context.Context) error - // Commit commits the changes to the database + // Commit commits the changes to the database. Commit(ctx context.Context) error } -// MutableStream is an interface for manipulating stream information +// MutableStream is an interface for manipulating stream information. type MutableStream interface { BucketName() string Path() string diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go index 837c6daf4..b53e9fc33 100644 --- a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go @@ -22,7 +22,7 @@ var errClass = errs.Class("kvmetainfo") const defaultSegmentLimit = 8 // TODO -// DB implements metainfo database +// DB implements metainfo database. type DB struct { project *Project @@ -34,7 +34,7 @@ type DB struct { encStore *encryption.Store } -// New creates a new metainfo database +// New creates a new metainfo database. func New(project *Project, metainfo *metainfo.Client, streams streams.Store, segments segments.Store, encStore *encryption.Store) *DB { return &DB{ project: project, @@ -45,22 +45,22 @@ func New(project *Project, metainfo *metainfo.Client, streams streams.Store, seg } } -// CreateBucket creates a new bucket with the specified information +// CreateBucket creates a new bucket with the specified information. func (db *DB) CreateBucket(ctx context.Context, bucketName string, info *storj.Bucket) (bucketInfo storj.Bucket, err error) { return db.project.CreateBucket(ctx, bucketName, info) } -// DeleteBucket deletes bucket +// DeleteBucket deletes bucket. func (db *DB) DeleteBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) { return db.project.DeleteBucket(ctx, bucketName) } -// GetBucket gets bucket information +// GetBucket gets bucket information. func (db *DB) GetBucket(ctx context.Context, bucketName string) (bucketInfo storj.Bucket, err error) { return db.project.GetBucket(ctx, bucketName) } -// ListBuckets lists buckets +// ListBuckets lists buckets. func (db *DB) ListBuckets(ctx context.Context, options storj.BucketListOptions) (list storj.BucketList, err error) { return db.project.ListBuckets(ctx, options) } diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go index 1b8e7e5eb..219c6742f 100644 --- a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go @@ -18,7 +18,7 @@ import ( "storj.io/uplink/private/storage/streams" ) -// DefaultRS default values for RedundancyScheme +// DefaultRS default values for RedundancyScheme. var DefaultRS = storj.RedundancyScheme{ Algorithm: storj.ReedSolomon, RequiredShares: 20, @@ -28,8 +28,8 @@ var DefaultRS = storj.RedundancyScheme{ ShareSize: 1 * memory.KiB.Int32(), } -// DefaultES default values for EncryptionParameters -// BlockSize should default to the size of a stripe +// DefaultES default values for EncryptionParameters. +// BlockSize should default to the size of a stripe. var DefaultES = storj.EncryptionParameters{ CipherSuite: storj.EncAESGCM, BlockSize: DefaultRS.StripeSize(), @@ -37,7 +37,7 @@ var DefaultES = storj.EncryptionParameters{ var contentTypeKey = "content-type" -// GetObject returns information about an object +// GetObject returns information about an object. func (db *DB) GetObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (info storj.Object, err error) { defer mon.Task()(&ctx)(&err) @@ -46,7 +46,7 @@ func (db *DB) GetObject(ctx context.Context, bucket storj.Bucket, path storj.Pat return info, err } -// GetObjectStream returns interface for reading the object stream +// GetObjectStream returns interface for reading the object stream. func (db *DB) GetObjectStream(ctx context.Context, bucket storj.Bucket, object storj.Object) (stream ReadOnlyStream, err error) { defer mon.Task()(&ctx)(&err) @@ -64,7 +64,7 @@ func (db *DB) GetObjectStream(ctx context.Context, bucket storj.Bucket, object s }, nil } -// CreateObject creates an uploading object and returns an interface for uploading Object information +// CreateObject creates an uploading object and returns an interface for uploading Object information. func (db *DB) CreateObject(ctx context.Context, bucket storj.Bucket, path storj.Path, createInfo *CreateObject) (object MutableObject, err error) { defer mon.Task()(&ctx)(&err) @@ -115,13 +115,13 @@ func (db *DB) CreateObject(ctx context.Context, bucket storj.Bucket, path storj. }, nil } -// ModifyObject modifies a committed object +// ModifyObject modifies a committed object. func (db *DB) ModifyObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (object MutableObject, err error) { defer mon.Task()(&ctx)(&err) return nil, errors.New("not implemented") } -// DeleteObject deletes an object from database +// DeleteObject deletes an object from database. func (db *DB) DeleteObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (_ storj.Object, err error) { defer mon.Task()(&ctx)(&err) @@ -147,19 +147,19 @@ func (db *DB) DeleteObject(ctx context.Context, bucket storj.Bucket, path storj. return obj, err } -// ModifyPendingObject creates an interface for updating a partially uploaded object +// ModifyPendingObject creates an interface for updating a partially uploaded object. func (db *DB) ModifyPendingObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (object MutableObject, err error) { defer mon.Task()(&ctx)(&err) return nil, errors.New("not implemented") } -// ListPendingObjects lists pending objects in bucket based on the ListOptions +// ListPendingObjects lists pending objects in bucket based on the ListOptions. func (db *DB) ListPendingObjects(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) { defer mon.Task()(&ctx)(&err) return storj.ObjectList{}, errors.New("not implemented") } -// ListObjects lists objects in bucket based on the ListOptions +// ListObjects lists objects in bucket based on the ListOptions. func (db *DB) ListObjects(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) { defer mon.Task()(&ctx)(&err) @@ -286,7 +286,7 @@ func (db *DB) ListObjects(ctx context.Context, bucket storj.Bucket, options stor return list, nil } -// ListObjectsExtended lists objects in bucket based on the ListOptions +// ListObjectsExtended lists objects in bucket based on the ListOptions. func (db *DB) ListObjectsExtended(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) { defer mon.Task()(&ctx)(&err) diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go index b14b3b75a..99a21326c 100644 --- a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go @@ -8,7 +8,7 @@ import ( "storj.io/uplink/private/storage/streams" ) -// Project implements project management operations +// Project implements project management operations. type Project struct { metainfo metainfo.Client streams streams.Store @@ -16,7 +16,7 @@ type Project struct { segmentsSize int64 } -// NewProject constructs a *Project +// NewProject constructs a *Project. func NewProject(streams streams.Store, encryptedBlockSize int32, segmentsSize int64, metainfo metainfo.Client) *Project { return &Project{ metainfo: metainfo, diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go index 19f66cbe6..4231de044 100644 --- a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go @@ -15,11 +15,11 @@ import ( ) var ( - // Error is the errs class of SetupProject + // Error is the errs class of SetupProject. Error = errs.Class("SetupProject error") ) -// SetupProject creates a project with temporary values until we can figure out how to bypass encryption related setup +// SetupProject creates a project with temporary values until we can figure out how to bypass encryption related setup. func SetupProject(m *metainfo.Client) (*Project, error) { maxBucketMetaSize := 10 * memory.MiB segment := segments.NewSegmentStore(m, nil) diff --git a/vendor/storj.io/uplink/private/piecestore/buffering.go b/vendor/storj.io/uplink/private/piecestore/buffering.go index ecc79a375..a5767cfd3 100644 --- a/vendor/storj.io/uplink/private/piecestore/buffering.go +++ b/vendor/storj.io/uplink/private/piecestore/buffering.go @@ -126,7 +126,7 @@ func (download *LockingDownload) Close() error { return download.download.Close() } -// GetHashAndLimit gets the download's hash and original order limit +// GetHashAndLimit gets the download's hash and original order limit. func (download *LockingDownload) GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) { return download.download.GetHashAndLimit() } diff --git a/vendor/storj.io/uplink/private/piecestore/client.go b/vendor/storj.io/uplink/private/piecestore/client.go index b03a62ce3..257aab511 100644 --- a/vendor/storj.io/uplink/private/piecestore/client.go +++ b/vendor/storj.io/uplink/private/piecestore/client.go @@ -46,9 +46,9 @@ type Client struct { config Config } -// Dial dials the target piecestore endpoint. -func Dial(ctx context.Context, dialer rpc.Dialer, target *pb.Node, log *zap.Logger, config Config) (*Client, error) { - conn, err := dialer.DialNode(ctx, target) +// DialNodeURL dials the target piecestore endpoint. +func DialNodeURL(ctx context.Context, dialer rpc.Dialer, nodeURL storj.NodeURL, log *zap.Logger, config Config) (*Client, error) { + conn, err := dialer.DialNodeURL(ctx, nodeURL) if err != nil { return nil, Error.Wrap(err) } @@ -61,30 +61,6 @@ func Dial(ctx context.Context, dialer rpc.Dialer, target *pb.Node, log *zap.Logg }, nil } -// Delete uses delete order limit to delete a piece on piece store. -// -// DEPRECATED in favor of DeletePieces. -func (client *Client) Delete(ctx context.Context, limit *pb.OrderLimit, privateKey storj.PiecePrivateKey) (err error) { - defer mon.Task()(&ctx)(&err) - _, err = client.client.Delete(ctx, &pb.PieceDeleteRequest{ - Limit: limit, - }) - return Error.Wrap(err) -} - -// DeletePieces deletes a set of pieces. -func (client *Client) DeletePieces(ctx context.Context, ids ...storj.PieceID) (err error) { - defer mon.Task()(&ctx)(&err) - if len(ids) == 0 { - // Avoid RPC calls if no pieces to delete. - return nil - } - _, err = client.client.DeletePieces(ctx, &pb.DeletePiecesRequest{ - PieceIds: ids, - }) - return Error.Wrap(err) -} - // Retain uses a bloom filter to tell the piece store which pieces to keep. func (client *Client) Retain(ctx context.Context, req *pb.RetainRequest) (err error) { defer mon.Task()(&ctx)(&err) @@ -97,7 +73,7 @@ func (client *Client) Close() error { return client.conn.Close() } -// GetPeerIdentity gets the connection's peer identity +// GetPeerIdentity gets the connection's peer identity. func (client *Client) GetPeerIdentity() (*identity.PeerIdentity, error) { return client.conn.PeerIdentity() } diff --git a/vendor/storj.io/uplink/private/piecestore/verification.go b/vendor/storj.io/uplink/private/piecestore/verification.go index c8cae8455..7e588d3a8 100644 --- a/vendor/storj.io/uplink/private/piecestore/verification.go +++ b/vendor/storj.io/uplink/private/piecestore/verification.go @@ -24,7 +24,7 @@ var ( ErrProtocol = errs.Class("protocol") // ErrVerifyUntrusted is an error in case there is a trust issue. ErrVerifyUntrusted = errs.Class("untrusted") - // ErrStorageNodeInvalidResponse is an error when a storage node returns a response with invalid data + // ErrStorageNodeInvalidResponse is an error when a storage node returns a response with invalid data. ErrStorageNodeInvalidResponse = errs.Class("storage node has returned an invalid response") ) diff --git a/vendor/storj.io/uplink/private/storage/segments/common.go b/vendor/storj.io/uplink/private/storage/segments/common.go index dacf74239..92be9928f 100644 --- a/vendor/storj.io/uplink/private/storage/segments/common.go +++ b/vendor/storj.io/uplink/private/storage/segments/common.go @@ -7,5 +7,5 @@ import ( "github.com/zeebo/errs" ) -// Error is the errs class of standard segment errors +// Error is the errs class of standard segment errors. var Error = errs.Class("segment error") diff --git a/vendor/storj.io/uplink/private/storage/segments/size.go b/vendor/storj.io/uplink/private/storage/segments/size.go index 90208f375..3fa8b9305 100644 --- a/vendor/storj.io/uplink/private/storage/segments/size.go +++ b/vendor/storj.io/uplink/private/storage/segments/size.go @@ -18,7 +18,7 @@ func SizeReader(r io.Reader) *SizedReader { return &SizedReader{r: r} } -// Read implements io.Reader.Read +// Read implements io.Reader.Read. func (r *SizedReader) Read(p []byte) (n int, err error) { n, err = r.r.Read(p) r.size += int64(n) diff --git a/vendor/storj.io/uplink/private/storage/segments/store.go b/vendor/storj.io/uplink/private/storage/segments/store.go index e7701f84e..f3a9e8fa5 100644 --- a/vendor/storj.io/uplink/private/storage/segments/store.go +++ b/vendor/storj.io/uplink/private/storage/segments/store.go @@ -11,7 +11,6 @@ import ( "time" "github.com/spacemonkeygo/monkit/v3" - "github.com/vivint/infectious" "storj.io/common/pb" "storj.io/common/ranger" @@ -25,7 +24,7 @@ var ( mon = monkit.Package() ) -// Meta info about a segment +// Meta info about a segment. type Meta struct { Modified time.Time Expiration time.Time @@ -33,7 +32,7 @@ type Meta struct { Data []byte } -// Store for segments +// Store for segments. type Store interface { // Ranger creates a ranger for downloading erasure codes from piece store nodes. Ranger(ctx context.Context, info storj.SegmentDownloadInfo, limits []*pb.AddressedOrderLimit, objectRS storj.RedundancyScheme) (ranger.Ranger, error) @@ -47,7 +46,7 @@ type segmentStore struct { rng *rand.Rand } -// NewSegmentStore creates a new instance of segmentStore +// NewSegmentStore creates a new instance of segmentStore. func NewSegmentStore(metainfo *metainfo.Client, ec ecclient.Client) Store { return &segmentStore{ metainfo: metainfo, @@ -56,33 +55,17 @@ func NewSegmentStore(metainfo *metainfo.Client, ec ecclient.Client) Store { } } -// Put uploads a segment to an erasure code client +// Put uploads a segment to an erasure code client. func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.Time, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy) (_ []*pb.SegmentPieceUploadResult, size int64, err error) { defer mon.Task()(&ctx)(&err) - sizedReader := SizeReader(NewPeekThresholdReader(data)) - successfulNodes, successfulHashes, err := s.ec.Put(ctx, limits, piecePrivateKey, rs, sizedReader, expiration) + sizedReader := SizeReader(data) + results, err := s.ec.PutSingleResult(ctx, limits, piecePrivateKey, rs, sizedReader, expiration) if err != nil { return nil, size, Error.Wrap(err) } - uploadResults := make([]*pb.SegmentPieceUploadResult, 0, len(successfulNodes)) - for i := range successfulNodes { - if successfulNodes[i] == nil { - continue - } - uploadResults = append(uploadResults, &pb.SegmentPieceUploadResult{ - PieceNum: int32(i), - NodeId: successfulNodes[i].Id, - Hash: successfulHashes[i], - }) - } - - if l := len(uploadResults); l < rs.OptimalThreshold() { - return nil, size, Error.New("uploaded results (%d) are below the optimal threshold (%d)", l, rs.OptimalThreshold()) - } - - return uploadResults, sizedReader.Size(), nil + return results, sizedReader.Size(), nil } // Ranger creates a ranger for downloading erasure codes from piece store nodes. @@ -116,12 +99,7 @@ func (s *segmentStore) Ranger( } } - fc, err := infectious.NewFEC(int(objectRS.RequiredShares), int(objectRS.TotalShares)) - if err != nil { - return nil, err - } - es := eestream.NewRSScheme(fc, int(objectRS.ShareSize)) - redundancy, err := eestream.NewRedundancyStrategy(es, int(objectRS.RepairShares), int(objectRS.OptimalShares)) + redundancy, err := eestream.NewRedundancyStrategyFromStorj(objectRS) if err != nil { return nil, err } diff --git a/vendor/storj.io/uplink/private/storage/streams/eof.go b/vendor/storj.io/uplink/private/storage/streams/eof.go index a68c43ca6..316954e30 100644 --- a/vendor/storj.io/uplink/private/storage/streams/eof.go +++ b/vendor/storj.io/uplink/private/storage/streams/eof.go @@ -5,14 +5,14 @@ package streams import "io" -// EOFReader holds reader and status of EOF +// EOFReader holds reader and status of EOF. type EOFReader struct { reader io.Reader eof bool err error } -// NewEOFReader keeps track of the state, has the internal reader reached EOF +// NewEOFReader keeps track of the state, has the internal reader reached EOF. func NewEOFReader(r io.Reader) *EOFReader { return &EOFReader{reader: r} } diff --git a/vendor/storj.io/uplink/private/storage/streams/path.go b/vendor/storj.io/uplink/private/storage/streams/path.go index d041a79d4..4faf52728 100644 --- a/vendor/storj.io/uplink/private/storage/streams/path.go +++ b/vendor/storj.io/uplink/private/storage/streams/path.go @@ -10,7 +10,7 @@ import ( "storj.io/common/storj" ) -// Path is a representation of an object path within a bucket +// Path is a representation of an object path within a bucket. type Path struct { bucket string unencPath paths.Unencrypted diff --git a/vendor/storj.io/uplink/private/storage/segments/peek.go b/vendor/storj.io/uplink/private/storage/streams/peek.go similarity index 86% rename from vendor/storj.io/uplink/private/storage/segments/peek.go rename to vendor/storj.io/uplink/private/storage/streams/peek.go index 6018752ad..01ffec82b 100644 --- a/vendor/storj.io/uplink/private/storage/segments/peek.go +++ b/vendor/storj.io/uplink/private/storage/streams/peek.go @@ -1,9 +1,13 @@ // Copyright (C) 2019 Storj Labs, Inc. // See LICENSE for copying information. -package segments +package streams -import "io" +import ( + "io" + + "github.com/zeebo/errs" +) // PeekThresholdReader allows a check to see if the size of a given reader // exceeds the maximum inline segment size or not. @@ -14,7 +18,7 @@ type PeekThresholdReader struct { readCalled bool } -// NewPeekThresholdReader creates a new instance of PeekThresholdReader +// NewPeekThresholdReader creates a new instance of PeekThresholdReader. func NewPeekThresholdReader(r io.Reader) (pt *PeekThresholdReader) { return &PeekThresholdReader{r: r} } @@ -38,10 +42,10 @@ func (pt *PeekThresholdReader) Read(p []byte) (n int, err error) { // is larger than the given threshold or not. func (pt *PeekThresholdReader) IsLargerThan(thresholdSize int) (bool, error) { if pt.isLargerCalled { - return false, Error.New("IsLargerThan can't be called more than once") + return false, errs.New("IsLargerThan can't be called more than once") } if pt.readCalled { - return false, Error.New("IsLargerThan can't be called after Read has been called") + return false, errs.New("IsLargerThan can't be called after Read has been called") } pt.isLargerCalled = true buf := make([]byte, thresholdSize+1) diff --git a/vendor/storj.io/uplink/private/storage/streams/shim.go b/vendor/storj.io/uplink/private/storage/streams/shim.go index a08df0265..f07b2dd03 100644 --- a/vendor/storj.io/uplink/private/storage/streams/shim.go +++ b/vendor/storj.io/uplink/private/storage/streams/shim.go @@ -20,7 +20,7 @@ type Metadata interface { Metadata() ([]byte, error) } -// Store interface methods for streams to satisfy to be a store +// Store interface methods for streams to satisfy to be a store. type Store interface { Get(ctx context.Context, path storj.Path, object storj.Object) (ranger.Ranger, error) Put(ctx context.Context, path storj.Path, data io.Reader, metadata Metadata, expiration time.Time) (Meta, error) diff --git a/vendor/storj.io/uplink/private/storage/streams/size.go b/vendor/storj.io/uplink/private/storage/streams/size.go index 5776a442c..3fe6e898e 100644 --- a/vendor/storj.io/uplink/private/storage/streams/size.go +++ b/vendor/storj.io/uplink/private/storage/streams/size.go @@ -5,13 +5,13 @@ package streams import "io" -// SizeReader holds reader and size read so far +// SizeReader holds reader and size read so far. type SizeReader struct { reader io.Reader size int64 } -// NewSizeReader keeps track of how much bytes are read from the reader +// NewSizeReader keeps track of how much bytes are read from the reader. func NewSizeReader(r io.Reader) *SizeReader { return &SizeReader{reader: r} } @@ -22,7 +22,7 @@ func (r *SizeReader) Read(p []byte) (n int, err error) { return n, err } -// Size returns the number of bytes read so far +// Size returns the number of bytes read so far. func (r *SizeReader) Size() int64 { return r.size } diff --git a/vendor/storj.io/uplink/private/storage/streams/store.go b/vendor/storj.io/uplink/private/storage/streams/store.go index a10f47c19..69c7ddcd9 100644 --- a/vendor/storj.io/uplink/private/storage/streams/store.go +++ b/vendor/storj.io/uplink/private/storage/streams/store.go @@ -25,7 +25,7 @@ import ( var mon = monkit.Package() -// Meta info about a stream +// Meta info about a stream. type Meta struct { Modified time.Time Expiration time.Time @@ -33,7 +33,7 @@ type Meta struct { Data []byte } -// Store interface methods for streams to satisfy to be a store +// Store interface methods for streams to satisfy to be a store. type typedStore interface { Get(ctx context.Context, path Path, object storj.Object) (ranger.Ranger, error) Put(ctx context.Context, path Path, data io.Reader, metadata Metadata, expiration time.Time) (Meta, error) @@ -157,7 +157,7 @@ func (s *streamStore) Put(ctx context.Context, path Path, data io.Reader, metada sizeReader := NewSizeReader(eofReader) segmentReader := io.LimitReader(sizeReader, s.segmentSize) - peekReader := segments.NewPeekThresholdReader(segmentReader) + peekReader := NewPeekThresholdReader(segmentReader) // If the data is larger than the inline threshold size, then it will be a remote segment isRemote, err := peekReader.IsLargerThan(s.inlineThreshold) if err != nil { @@ -287,7 +287,7 @@ func (s *streamStore) Put(ctx context.Context, path Path, data io.Reader, metada return Meta{}, err } - // encrypt metadata with the content encryption key and zero nonce + // encrypt metadata with the content encryption key and zero nonce. encryptedStreamInfo, err := encryption.Encrypt(streamInfo, s.cipher, &contentKey, &storj.Nonce{}) if err != nil { return Meta{}, err @@ -414,7 +414,7 @@ func (s *streamStore) Get(ctx context.Context, path Path, object storj.Object) ( return ranger.Concat(rangers...), nil } -// Delete all the segments, with the last one last +// Delete all the segments, with the last one last. func (s *streamStore) Delete(ctx context.Context, path Path) (_ storj.ObjectInfo, err error) { defer mon.Task()(&ctx)(&err) @@ -430,7 +430,7 @@ func (s *streamStore) Delete(ctx context.Context, path Path) (_ storj.ObjectInfo return object, err } -// ListItem is a single item in a listing +// ListItem is a single item in a listing. type ListItem struct { Path string Meta Meta @@ -522,7 +522,7 @@ func decryptRanger(ctx context.Context, rr ranger.Ranger, decryptedSize int64, c return encryption.Unpad(rd, int(rd.Size()-decryptedSize)) } -// CancelHandler handles clean up of segments on receiving CTRL+C +// CancelHandler handles clean up of segments on receiving CTRL+C. func (s *streamStore) cancelHandler(ctx context.Context, path Path) { defer mon.Task()(&ctx)(nil) @@ -544,7 +544,7 @@ func getEncryptedKeyAndNonce(m *pb.SegmentMeta) (storj.EncryptedPrivateKey, *sto return m.EncryptedKey, &nonce } -// TypedDecryptStreamInfo decrypts stream info +// TypedDecryptStreamInfo decrypts stream info. func TypedDecryptStreamInfo(ctx context.Context, streamMetaBytes []byte, path Path, encStore *encryption.Store) ( _ *pb.StreamInfo, streamMeta pb.StreamMeta, err error) { defer mon.Task()(&ctx)(&err) diff --git a/vendor/storj.io/uplink/private/stream/common.go b/vendor/storj.io/uplink/private/stream/common.go index 6fad5b54d..a9d33faee 100644 --- a/vendor/storj.io/uplink/private/stream/common.go +++ b/vendor/storj.io/uplink/private/stream/common.go @@ -7,5 +7,5 @@ import ( "github.com/zeebo/errs" ) -// Error is the errs class of stream errors +// Error is the errs class of stream errors. var Error = errs.Class("stream error") diff --git a/vendor/storj.io/uplink/private/testuplink/uplink.go b/vendor/storj.io/uplink/private/testuplink/uplink.go new file mode 100644 index 000000000..9ed157c4b --- /dev/null +++ b/vendor/storj.io/uplink/private/testuplink/uplink.go @@ -0,0 +1,23 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package testuplink + +import ( + "context" + + "storj.io/common/memory" +) + +type segmentSizeKey struct{} + +// WithMaxSegmentSize creates context with max segment size for testing purposes. +func WithMaxSegmentSize(ctx context.Context, segmentSize memory.Size) context.Context { + return context.WithValue(ctx, segmentSizeKey{}, segmentSize) +} + +// GetMaxSegmentSize returns max segment size from context if exists. +func GetMaxSegmentSize(ctx context.Context) (memory.Size, bool) { + segmentSize, ok := ctx.Value(segmentSizeKey{}).(memory.Size) + return segmentSize, ok +} diff --git a/vendor/storj.io/uplink/project.go b/vendor/storj.io/uplink/project.go index 2e56ef897..4b56f163e 100644 --- a/vendor/storj.io/uplink/project.go +++ b/vendor/storj.io/uplink/project.go @@ -20,10 +20,11 @@ import ( "storj.io/uplink/private/metainfo/kvmetainfo" "storj.io/uplink/private/storage/segments" "storj.io/uplink/private/storage/streams" + "storj.io/uplink/private/testuplink" ) // maxSegmentSize can be used to override max segment size with ldflags build parameter. -// Example: go build -ldflags "-X 'storj.io/uplink.maxSegmentSize=1MiB'" storj.io/storj/cmd/uplink +// Example: go build -ldflags "-X 'storj.io/uplink.maxSegmentSize=1MiB'" storj.io/storj/cmd/uplink. var maxSegmentSize string // Project provides access to managing buckets and objects. @@ -47,7 +48,7 @@ func OpenProject(ctx context.Context, access *Access) (*Project, error) { // OpenProject opens a project with the specific access grant. func (config Config) OpenProject(ctx context.Context, access *Access) (project *Project, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) if access == nil { return nil, packageError.New("access grant is nil") @@ -90,6 +91,11 @@ func (config Config) OpenProject(ctx context.Context, access *Access) (project * if err != nil { return nil, packageError.Wrap(err) } + } else { + s, ok := testuplink.GetMaxSegmentSize(ctx) + if ok { + segmentsSize = s.Int64() + } } // TODO: This should come from the EncryptionAccess. For now it's hardcoded to twice the diff --git a/vendor/storj.io/uplink/upload.go b/vendor/storj.io/uplink/upload.go index 8e818ad08..c72a073f8 100644 --- a/vendor/storj.io/uplink/upload.go +++ b/vendor/storj.io/uplink/upload.go @@ -28,7 +28,7 @@ type UploadOptions struct { // UploadObject starts an upload to the specific key. func (project *Project) UploadObject(ctx context.Context, bucket, key string, options *UploadOptions) (upload *Upload, err error) { - defer mon.Func().ResetTrace(&ctx)(&err) + defer mon.Func().RestartTrace(&ctx)(&err) if bucket == "" { return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) @@ -44,10 +44,7 @@ func (project *Project) UploadObject(ctx context.Context, bucket, key string, op b := storj.Bucket{Name: bucket} obj, err := project.db.CreateObject(ctx, b, key, nil) if err != nil { - if storj.ErrNoPath.Has(err) { - return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) - } - return nil, convertKnownErrors(err, bucket) + return nil, convertKnownErrors(err, bucket, key) } info := obj.Info() @@ -110,7 +107,7 @@ func (upload *Upload) Commit() error { return errwrapf("%w: already committed", ErrUploadDone) } - return convertKnownErrors(err, upload.bucket) + return convertKnownErrors(err, upload.bucket, upload.object.Key) } // Abort aborts the upload.