backend/tardigrade: Upgrade to uplink v1.0.6

This fixes an important bug with listing that affects users with more
than 500 objects in a listing operation.
This commit is contained in:
Caleb Case 2020-05-29 09:08:11 -04:00 committed by Nick Craig-Wood
parent 764b90a519
commit e7bd392a69
154 changed files with 1242 additions and 1028 deletions

2
go.mod
View file

@ -67,7 +67,7 @@ require (
google.golang.org/api v0.21.1-0.20200411000818-c8cf5cff125e google.golang.org/api v0.21.1-0.20200411000818-c8cf5cff125e
google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 // indirect google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 // indirect
gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 v2.2.8
storj.io/uplink v1.0.5 storj.io/uplink v1.0.6
) )
go 1.13 go 1.13

16
go.sum
View file

@ -359,8 +359,8 @@ github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1/go.mod h1:7NL
github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.6 h1:BKPrEaLokVAxlwHkD7jawViBa/IU9/bgXbZLWgjbdSM= github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 h1:WcQDknqg0qajLNYKv3mXgbkWlYs5rPgZehGJFWePHVI=
github.com/spacemonkeygo/monkit/v3 v3.0.6/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
@ -657,9 +657,9 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
storj.io/common v0.0.0-20200429074521-4ba140e4b747 h1:Ne1x0M80uNyN6tHIs15CGJqHbreKbvH5BOq4jdWsqMc= storj.io/common v0.0.0-20200519171747-3ff8acf78c46 h1:Yx73D928PKtyQYPXHuQ5WFES4t+0nufxbhwyf8VodMw=
storj.io/common v0.0.0-20200429074521-4ba140e4b747/go.mod h1:lfsaMdtHwrUOtSYkw73meCyZMUYiaFBKVqx6zgeSz2o= storj.io/common v0.0.0-20200519171747-3ff8acf78c46/go.mod h1:hqUDJlDHU1kZuZmfLohWgGa0Cf3pL1IH8DsxLCsamNQ=
storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw= storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw= storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
storj.io/uplink v1.0.5 h1:RH6LUZQPOJZ01JpM0YmghDu5xyex5gyn32NSCzsPSr4= storj.io/uplink v1.0.6 h1:UztG/bSJitJt2c5Ofq9SmQ+a/yrLiHcPVR9gLfzcWG0=
storj.io/uplink v1.0.5/go.mod h1:GkChEUgHFuUR2WNpqFw3NeqglXz6/zp6n5Rxt0IVfHw= storj.io/uplink v1.0.6/go.mod h1:F+AeZR4l9F9A7K1K+GXJJarwaKOQK0RWmbyy/maMFEI=

View file

@ -272,6 +272,33 @@ func (f *Func) ResetTrace(ctx *context.Context,
return exit return exit
} }
// RestartTrace is like Func.Task, except it always creates a new Trace and inherient
// all tags from the existing trace.
func (f *Func) RestartTrace(ctx *context.Context, args ...interface{}) func(*error) {
existingSpan := SpanFromCtx(*ctx)
if existingSpan == nil {
return f.ResetTrace(ctx, args)
}
existingTrace := existingSpan.Trace()
if existingTrace == nil {
return f.ResetTrace(ctx, args)
}
ctx = cleanCtx(ctx)
if ctx == &taskSecret && taskArgs(f, args) {
return nil
}
trace := NewTrace(NewId())
trace.copyFrom(existingTrace)
f.scope.r.observeTrace(trace)
s, exit := newSpan(*ctx, f, args, trace.Id(), trace)
if ctx != &unparented {
*ctx = s
}
return exit
}
var unparented = context.Background() var unparented = context.Background()
func cleanCtx(ctx *context.Context) *context.Context { func cleanCtx(ctx *context.Context) *context.Context {

View file

@ -108,6 +108,17 @@ func removeObserverFrom(parent **spanObserverTuple, ref *spanObserverTuple) (
// Id returns the id of the Trace // Id returns the id of the Trace
func (t *Trace) Id() int64 { return t.id } func (t *Trace) Id() int64 { return t.id }
// GetAll returns values associated with a trace. See SetAll.
func (t *Trace) GetAll() (val map[interface{}]interface{}) {
t.mtx.Lock()
defer t.mtx.Unlock()
new := make(map[interface{}]interface{}, len(t.vals))
for k, v := range t.vals {
new[k] = v
}
return new
}
// Get returns a value associated with a key on a trace. See Set. // Get returns a value associated with a key on a trace. See Set.
func (t *Trace) Get(key interface{}) (val interface{}) { func (t *Trace) Get(key interface{}) (val interface{}) {
t.mtx.Lock() t.mtx.Lock()
@ -129,6 +140,14 @@ func (t *Trace) Set(key, val interface{}) {
t.mtx.Unlock() t.mtx.Unlock()
} }
// copyFrom replace all key/value on a trace with a new sets of key/value.
func (t *Trace) copyFrom(s *Trace) {
vals := s.GetAll()
t.mtx.Lock()
defer t.mtx.Unlock()
t.vals = vals
}
func (t *Trace) incrementSpans() { atomic.AddInt64(&t.spanCount, 1) } func (t *Trace) incrementSpans() { atomic.AddInt64(&t.spanCount, 1) }
func (t *Trace) decrementSpans() { atomic.AddInt64(&t.spanCount, -1) } func (t *Trace) decrementSpans() { atomic.AddInt64(&t.spanCount, -1) }

11
vendor/modules.txt vendored
View file

@ -202,7 +202,7 @@ github.com/sirupsen/logrus
github.com/skratchdot/open-golang/open github.com/skratchdot/open-golang/open
# github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 # github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1
github.com/spacemonkeygo/errors github.com/spacemonkeygo/errors
# github.com/spacemonkeygo/monkit/v3 v3.0.6 # github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752
github.com/spacemonkeygo/monkit/v3 github.com/spacemonkeygo/monkit/v3
github.com/spacemonkeygo/monkit/v3/monotime github.com/spacemonkeygo/monkit/v3/monotime
# github.com/spf13/cobra v1.0.0 # github.com/spf13/cobra v1.0.0
@ -393,12 +393,11 @@ google.golang.org/grpc/status
google.golang.org/grpc/tap google.golang.org/grpc/tap
# gopkg.in/yaml.v2 v2.2.8 # gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# storj.io/common v0.0.0-20200429074521-4ba140e4b747 # storj.io/common v0.0.0-20200519171747-3ff8acf78c46
storj.io/common/encryption storj.io/common/encryption
storj.io/common/errs2 storj.io/common/errs2
storj.io/common/fpath storj.io/common/fpath
storj.io/common/identity storj.io/common/identity
storj.io/common/internal/grpchook
storj.io/common/macaroon storj.io/common/macaroon
storj.io/common/memory storj.io/common/memory
storj.io/common/netutil storj.io/common/netutil
@ -419,8 +418,9 @@ storj.io/common/signing
storj.io/common/storj storj.io/common/storj
storj.io/common/sync2 storj.io/common/sync2
storj.io/common/uuid storj.io/common/uuid
# storj.io/drpc v0.0.11 # storj.io/drpc v0.0.12
storj.io/drpc storj.io/drpc
storj.io/drpc/drpccache
storj.io/drpc/drpcconn storj.io/drpc/drpcconn
storj.io/drpc/drpcctx storj.io/drpc/drpcctx
storj.io/drpc/drpcdebug storj.io/drpc/drpcdebug
@ -432,7 +432,7 @@ storj.io/drpc/drpcmux
storj.io/drpc/drpcsignal storj.io/drpc/drpcsignal
storj.io/drpc/drpcstream storj.io/drpc/drpcstream
storj.io/drpc/drpcwire storj.io/drpc/drpcwire
# storj.io/uplink v1.0.5 # storj.io/uplink v1.0.6
storj.io/uplink storj.io/uplink
storj.io/uplink/internal/expose storj.io/uplink/internal/expose
storj.io/uplink/internal/telemetryclient storj.io/uplink/internal/telemetryclient
@ -444,3 +444,4 @@ storj.io/uplink/private/piecestore
storj.io/uplink/private/storage/segments storj.io/uplink/private/storage/segments
storj.io/uplink/private/storage/streams storj.io/uplink/private/storage/streams
storj.io/uplink/private/stream storj.io/uplink/private/stream
storj.io/uplink/private/testuplink

View file

@ -130,7 +130,7 @@ func (s *aesgcmDecrypter) Transform(out, in []byte, blockNum int64) ([]byte, err
return plainData, nil return plainData, nil
} }
// EncryptAESGCM encrypts byte data with a key and nonce. The cipher data is returned // EncryptAESGCM encrypts byte data with a key and nonce. It returns the cipher data.
func EncryptAESGCM(data []byte, key *storj.Key, nonce *AESGCMNonce) (cipherData []byte, err error) { func EncryptAESGCM(data []byte, key *storj.Key, nonce *AESGCMNonce) (cipherData []byte, err error) {
block, err := aes.NewCipher(key[:]) block, err := aes.NewCipher(key[:])
if err != nil { if err != nil {
@ -144,7 +144,7 @@ func EncryptAESGCM(data []byte, key *storj.Key, nonce *AESGCMNonce) (cipherData
return cipherData, nil return cipherData, nil
} }
// DecryptAESGCM decrypts byte data with a key and nonce. The plain data is returned // DecryptAESGCM decrypts byte data with a key and nonce. It returns the plain data.
func DecryptAESGCM(cipherData []byte, key *storj.Key, nonce *AESGCMNonce) (data []byte, err error) { func DecryptAESGCM(cipherData []byte, key *storj.Key, nonce *AESGCMNonce) (data []byte, err error) {
if len(cipherData) == 0 { if len(cipherData) == 0 {
return []byte{}, Error.New("empty cipher data") return []byte{}, Error.New("empty cipher data")

View file

@ -8,11 +8,11 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// Error is the default encryption errs class // Error is the default encryption errs class.
var Error = errs.Class("encryption error") var Error = errs.Class("encryption error")
// ErrDecryptFailed is the errs class when the decryption fails // ErrDecryptFailed is the errs class when the decryption fails.
var ErrDecryptFailed = errs.Class("decryption failed, check encryption key") var ErrDecryptFailed = errs.Class("decryption failed, check encryption key")
// ErrInvalidConfig is the errs class for invalid configuration // ErrInvalidConfig is the errs class for invalid configuration.
var ErrInvalidConfig = errs.Class("invalid encryption configuration") var ErrInvalidConfig = errs.Class("invalid encryption configuration")

View file

@ -11,28 +11,28 @@ import (
) )
const ( const (
// AESGCMNonceSize is the size of an AES-GCM nonce // AESGCMNonceSize is the size of an AES-GCM nonce.
AESGCMNonceSize = 12 AESGCMNonceSize = 12
// unit32Size is the number of bytes in the uint32 type // unit32Size is the number of bytes in the uint32 type.
uint32Size = 4 uint32Size = 4
) )
// AESGCMNonce represents the nonce used by the AES-GCM protocol // AESGCMNonce represents the nonce used by the AES-GCM protocol.
type AESGCMNonce [AESGCMNonceSize]byte type AESGCMNonce [AESGCMNonceSize]byte
// ToAESGCMNonce returns the nonce as a AES-GCM nonce // ToAESGCMNonce returns the nonce as a AES-GCM nonce.
func ToAESGCMNonce(nonce *storj.Nonce) *AESGCMNonce { func ToAESGCMNonce(nonce *storj.Nonce) *AESGCMNonce {
aes := new(AESGCMNonce) aes := new(AESGCMNonce)
copy((*aes)[:], nonce[:AESGCMNonceSize]) copy((*aes)[:], nonce[:AESGCMNonceSize])
return aes return aes
} }
// Increment increments the nonce with the given amount // Increment increments the nonce with the given amount.
func Increment(nonce *storj.Nonce, amount int64) (truncated bool, err error) { func Increment(nonce *storj.Nonce, amount int64) (truncated bool, err error) {
return incrementBytes(nonce[:], amount) return incrementBytes(nonce[:], amount)
} }
// Encrypt encrypts data with the given cipher, key and nonce // Encrypt encrypts data with the given cipher, key and nonce.
func Encrypt(data []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) { func Encrypt(data []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) {
// Don't encrypt empty slice // Don't encrypt empty slice
if len(data) == 0 { if len(data) == 0 {
@ -53,7 +53,7 @@ func Encrypt(data []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj
} }
} }
// Decrypt decrypts cipherData with the given cipher, key and nonce // Decrypt decrypts cipherData with the given cipher, key and nonce.
func Decrypt(cipherData []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) { func Decrypt(cipherData []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) {
// Don't decrypt empty slice // Don't decrypt empty slice
if len(cipherData) == 0 { if len(cipherData) == 0 {
@ -74,7 +74,7 @@ func Decrypt(cipherData []byte, cipher storj.CipherSuite, key *storj.Key, nonce
} }
} }
// NewEncrypter creates a Transformer using the given cipher, key and nonce to encrypt data passing through it // NewEncrypter creates a Transformer using the given cipher, key and nonce to encrypt data passing through it.
func NewEncrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { func NewEncrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) {
switch cipher { switch cipher {
case storj.EncNull: case storj.EncNull:
@ -90,7 +90,7 @@ func NewEncrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj
} }
} }
// NewDecrypter creates a Transformer using the given cipher, key and nonce to decrypt data passing through it // NewDecrypter creates a Transformer using the given cipher, key and nonce to decrypt data passing through it.
func NewDecrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { func NewDecrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) {
switch cipher { switch cipher {
case storj.EncNull: case storj.EncNull:
@ -106,12 +106,12 @@ func NewDecrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj
} }
} }
// EncryptKey encrypts keyToEncrypt with the given cipher, key and nonce // EncryptKey encrypts keyToEncrypt with the given cipher, key and nonce.
func EncryptKey(keyToEncrypt *storj.Key, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (storj.EncryptedPrivateKey, error) { func EncryptKey(keyToEncrypt *storj.Key, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (storj.EncryptedPrivateKey, error) {
return Encrypt(keyToEncrypt[:], cipher, key, nonce) return Encrypt(keyToEncrypt[:], cipher, key, nonce)
} }
// DecryptKey decrypts keyToDecrypt with the given cipher, key and nonce // DecryptKey decrypts keyToDecrypt with the given cipher, key and nonce.
func DecryptKey(keyToDecrypt storj.EncryptedPrivateKey, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (*storj.Key, error) { func DecryptKey(keyToDecrypt storj.EncryptedPrivateKey, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (*storj.Key, error) {
plainData, err := Decrypt(keyToDecrypt, cipher, key, nonce) plainData, err := Decrypt(keyToDecrypt, cipher, key, nonce)
if err != nil { if err != nil {
@ -124,7 +124,7 @@ func DecryptKey(keyToDecrypt storj.EncryptedPrivateKey, cipher storj.CipherSuite
return &decryptedKey, nil return &decryptedKey, nil
} }
// DeriveKey derives new key from the given key and message using HMAC-SHA512 // DeriveKey derives new key from the given key and message using HMAC-SHA512.
func DeriveKey(key *storj.Key, message string) (*storj.Key, error) { func DeriveKey(key *storj.Key, message string) (*storj.Key, error) {
mac := hmac.New(sha512.New, key[:]) mac := hmac.New(sha512.New, key[:])
_, err := mac.Write([]byte(message)) _, err := mac.Write([]byte(message))

View file

@ -397,7 +397,7 @@ func decryptPathComponent(comp string, cipher storj.CipherSuite, key *storj.Key)
// `\xff` escapes to `\xfe\x02` // `\xff` escapes to `\xfe\x02`
// `\x00` escapes to `\x01\x01` // `\x00` escapes to `\x01\x01`
// `\x01` escapes to `\x01\x02 // `\x01` escapes to `\x01\x02
// for more details see docs/design/path-component-encoding.md // for more details see docs/design/path-component-encoding.md.
func encodeSegment(segment []byte) []byte { func encodeSegment(segment []byte) []byte {
if len(segment) == 0 { if len(segment) == 0 {
return emptyComponent return emptyComponent

View file

@ -104,12 +104,12 @@ func (s *secretboxDecrypter) Transform(out, in []byte, blockNum int64) ([]byte,
return rv, nil return rv, nil
} }
// EncryptSecretBox encrypts byte data with a key and nonce. The cipher data is returned // EncryptSecretBox encrypts byte data with a key and nonce. The cipher data is returned.
func EncryptSecretBox(data []byte, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) { func EncryptSecretBox(data []byte, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) {
return secretbox.Seal(nil, data, nonce.Raw(), key.Raw()), nil return secretbox.Seal(nil, data, nonce.Raw(), key.Raw()), nil
} }
// DecryptSecretBox decrypts byte data with a key and nonce. The plain data is returned // DecryptSecretBox decrypts byte data with a key and nonce. The plain data is returned.
func DecryptSecretBox(cipherData []byte, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) { func DecryptSecretBox(cipherData []byte, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) {
data, success := secretbox.Open(nil, cipherData, nonce.Raw(), key.Raw()) data, success := secretbox.Open(nil, cipherData, nonce.Raw(), key.Raw())
if !success { if !success {

View file

@ -35,20 +35,20 @@ type transformedReader struct {
bytesRead int bytesRead int
} }
// NoopTransformer is a dummy Transformer that passes data through without modifying it // NoopTransformer is a dummy Transformer that passes data through without modifying it.
type NoopTransformer struct{} type NoopTransformer struct{}
// InBlockSize is 1 // InBlockSize is 1.
func (t *NoopTransformer) InBlockSize() int { func (t *NoopTransformer) InBlockSize() int {
return 1 return 1
} }
// OutBlockSize is 1 // OutBlockSize is 1.
func (t *NoopTransformer) OutBlockSize() int { func (t *NoopTransformer) OutBlockSize() int {
return 1 return 1
} }
// Transform returns the input without modification // Transform returns the input without modification.
func (t *NoopTransformer) Transform(out, in []byte, blockNum int64) ([]byte, error) { func (t *NoopTransformer) Transform(out, in []byte, blockNum int64) ([]byte, error) {
return append(out, in...), nil return append(out, in...), nil
} }
@ -137,7 +137,7 @@ func (t *transformedRanger) Size() int64 {
// CalcEncompassingBlocks is a useful helper function that, given an offset, // CalcEncompassingBlocks is a useful helper function that, given an offset,
// length, and blockSize, will tell you which blocks contain the requested // length, and blockSize, will tell you which blocks contain the requested
// offset and length // offset and length.
func CalcEncompassingBlocks(offset, length int64, blockSize int) ( func CalcEncompassingBlocks(offset, length int64, blockSize int) (
firstBlock, blockCount int64) { firstBlock, blockCount int64) {
firstBlock = offset / int64(blockSize) firstBlock = offset / int64(blockSize)

View file

@ -9,7 +9,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// Collect returns first error from channel and all errors that happen within duration // Collect returns first error from channel and all errors that happen within duration.
func Collect(errch chan error, duration time.Duration) error { func Collect(errch chan error, duration time.Duration) error {
errch = discardNil(errch) errch = discardNil(errch)
errlist := []error{<-errch} errlist := []error{<-errch}
@ -24,7 +24,7 @@ func Collect(errch chan error, duration time.Duration) error {
} }
} }
// discard nil errors that are returned from services // discard nil errors that are returned from services.
func discardNil(ch chan error) chan error { func discardNil(ch chan error) chan error {
r := make(chan error) r := make(chan error)
go func() { go func() {

View file

@ -8,7 +8,6 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/common/internal/grpchook"
"storj.io/common/rpc/rpcstatus" "storj.io/common/rpc/rpcstatus"
) )
@ -16,7 +15,6 @@ import (
func IsCanceled(err error) bool { func IsCanceled(err error) bool {
return errs.IsFunc(err, func(err error) bool { return errs.IsFunc(err, func(err error) bool {
return err == context.Canceled || return err == context.Canceled ||
grpchook.IsErrServerStopped(err) ||
rpcstatus.Code(err) == rpcstatus.Canceled rpcstatus.Code(err) == rpcstatus.Canceled
}) })
} }

View file

@ -10,7 +10,7 @@ import (
"strings" "strings"
) )
//EditFile opens the best OS-specific text editor we can find //EditFile opens the best OS-specific text editor we can find.
func EditFile(fileToEdit string) error { func EditFile(fileToEdit string) error {
editorPath := getEditorPath() editorPath := getEditorPath()
if editorPath == "" { if editorPath == "" {

8
vendor/storj.io/common/fpath/os.go generated vendored
View file

@ -15,7 +15,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// IsRoot returns whether path is the root directory // IsRoot returns whether path is the root directory.
func IsRoot(path string) bool { func IsRoot(path string) bool {
abs, err := filepath.Abs(path) abs, err := filepath.Abs(path)
if err == nil { if err == nil {
@ -25,7 +25,7 @@ func IsRoot(path string) bool {
return filepath.Dir(path) == path return filepath.Dir(path) == path
} }
// ApplicationDir returns best base directory for specific OS // ApplicationDir returns best base directory for specific OS.
func ApplicationDir(subdir ...string) string { func ApplicationDir(subdir ...string) string {
for i := range subdir { for i := range subdir {
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
@ -62,7 +62,7 @@ func ApplicationDir(subdir ...string) string {
return filepath.Join(append([]string{appdir}, subdir...)...) return filepath.Join(append([]string{appdir}, subdir...)...)
} }
// IsValidSetupDir checks if directory is valid for setup configuration // IsValidSetupDir checks if directory is valid for setup configuration.
func IsValidSetupDir(name string) (ok bool, err error) { func IsValidSetupDir(name string) (ok bool, err error) {
_, err = os.Stat(name) _, err = os.Stat(name)
if err != nil { if err != nil {
@ -100,7 +100,7 @@ func IsValidSetupDir(name string) (ok bool, err error) {
} }
} }
// IsWritable determines if a directory is writeable // IsWritable determines if a directory is writeable.
func IsWritable(filepath string) (bool, error) { func IsWritable(filepath string) (bool, error) {
info, err := os.Stat(filepath) info, err := os.Stat(filepath)
if err != nil { if err != nil {

14
vendor/storj.io/common/fpath/path.go generated vendored
View file

@ -50,7 +50,7 @@ func parseBucket(o string) (bucket, rest string) {
return "", o return "", o
} }
// New creates new FPath from the given URL // New creates new FPath from the given URL.
func New(p string) (FPath, error) { func New(p string) (FPath, error) {
fp := FPath{original: p} fp := FPath{original: p}
@ -101,7 +101,7 @@ func New(p string) (FPath, error) {
return fp, nil return fp, nil
} }
// Join is appends the given segment to the path // Join is appends the given segment to the path.
func (p FPath) Join(segment string) FPath { func (p FPath) Join(segment string) FPath {
if p.local { if p.local {
p.original = filepath.Join(p.original, segment) p.original = filepath.Join(p.original, segment)
@ -113,7 +113,7 @@ func (p FPath) Join(segment string) FPath {
return p return p
} }
// Base returns the last segment of the path // Base returns the last segment of the path.
func (p FPath) Base() string { func (p FPath) Base() string {
if p.local { if p.local {
return filepath.Base(p.original) return filepath.Base(p.original)
@ -124,12 +124,12 @@ func (p FPath) Base() string {
return path.Base(p.path) return path.Base(p.path)
} }
// Bucket returns the first segment of path // Bucket returns the first segment of path.
func (p FPath) Bucket() string { func (p FPath) Bucket() string {
return p.bucket return p.bucket
} }
// Path returns the URL path without the scheme // Path returns the URL path without the scheme.
func (p FPath) Path() string { func (p FPath) Path() string {
if p.local { if p.local {
return p.original return p.original
@ -137,12 +137,12 @@ func (p FPath) Path() string {
return p.path return p.path
} }
// IsLocal returns whether the path refers to local or remote location // IsLocal returns whether the path refers to local or remote location.
func (p FPath) IsLocal() bool { func (p FPath) IsLocal() bool {
return p.local return p.local
} }
// String returns the entire URL (untouched) // String returns the entire URL (untouched).
func (p FPath) String() string { func (p FPath) String() string {
return p.original return p.original
} }

View file

@ -11,7 +11,7 @@ import "context"
// other packages. // other packages.
type key int type key int
// temp is the context key for temp struct // temp is the context key for temp struct.
const tempKey key = 0 const tempKey key = 0
type temp struct { type temp struct {
@ -19,7 +19,7 @@ type temp struct {
directory string directory string
} }
// WithTempData creates context with information how store temporary data, in memory or on disk // WithTempData creates context with information how store temporary data, in memory or on disk.
func WithTempData(ctx context.Context, directory string, inmemory bool) context.Context { func WithTempData(ctx context.Context, directory string, inmemory bool) context.Context {
temp := temp{ temp := temp{
inmemory: inmemory, inmemory: inmemory,
@ -28,7 +28,7 @@ func WithTempData(ctx context.Context, directory string, inmemory bool) context.
return context.WithValue(ctx, tempKey, temp) return context.WithValue(ctx, tempKey, temp)
} }
// GetTempData returns if temporary data should be stored in memory or on disk // GetTempData returns if temporary data should be stored in memory or on disk.
func GetTempData(ctx context.Context) (string, bool, bool) { func GetTempData(ctx context.Context) (string, bool, bool) {
tempValue, ok := ctx.Value(tempKey).(temp) tempValue, ok := ctx.Value(tempKey).(temp)
if !ok { if !ok {

View file

@ -26,7 +26,7 @@ import (
const minimumLoggableDifficulty = 8 const minimumLoggableDifficulty = 8
// PeerCertificateAuthority represents the CA which is used to validate peer identities // PeerCertificateAuthority represents the CA which is used to validate peer identities.
type PeerCertificateAuthority struct { type PeerCertificateAuthority struct {
RestChain []*x509.Certificate RestChain []*x509.Certificate
// Cert is the x509 certificate of the CA // Cert is the x509 certificate of the CA
@ -35,7 +35,7 @@ type PeerCertificateAuthority struct {
ID storj.NodeID ID storj.NodeID
} }
// FullCertificateAuthority represents the CA which is used to author and validate full identities // FullCertificateAuthority represents the CA which is used to author and validate full identities.
type FullCertificateAuthority struct { type FullCertificateAuthority struct {
RestChain []*x509.Certificate RestChain []*x509.Certificate
// Cert is the x509 certificate of the CA // Cert is the x509 certificate of the CA
@ -46,7 +46,7 @@ type FullCertificateAuthority struct {
Key crypto.PrivateKey Key crypto.PrivateKey
} }
// CASetupConfig is for creating a CA // CASetupConfig is for creating a CA.
type CASetupConfig struct { type CASetupConfig struct {
VersionNumber uint `default:"0" help:"which identity version to use (0 is latest)"` VersionNumber uint `default:"0" help:"which identity version to use (0 is latest)"`
ParentCertPath string `help:"path to the parent authority's certificate chain"` ParentCertPath string `help:"path to the parent authority's certificate chain"`
@ -59,7 +59,7 @@ type CASetupConfig struct {
Concurrency uint `help:"number of concurrent workers for certificate authority generation" default:"4"` Concurrency uint `help:"number of concurrent workers for certificate authority generation" default:"4"`
} }
// NewCAOptions is used to pass parameters to `NewCA` // NewCAOptions is used to pass parameters to `NewCA`.
type NewCAOptions struct { type NewCAOptions struct {
// VersionNumber is the IDVersion to use for the identity // VersionNumber is the IDVersion to use for the identity
VersionNumber storj.IDVersionNumber VersionNumber storj.IDVersionNumber
@ -75,18 +75,18 @@ type NewCAOptions struct {
Logger io.Writer Logger io.Writer
} }
// PeerCAConfig is for locating a CA certificate without a private key // PeerCAConfig is for locating a CA certificate without a private key.
type PeerCAConfig struct { type PeerCAConfig struct {
CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"` CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"`
} }
// FullCAConfig is for locating a CA certificate and it's private key // FullCAConfig is for locating a CA certificate and it's private key.
type FullCAConfig struct { type FullCAConfig struct {
CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"` CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"`
KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/ca.key"` KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/ca.key"`
} }
// NewCA creates a new full identity with the given difficulty // NewCA creates a new full identity with the given difficulty.
func NewCA(ctx context.Context, opts NewCAOptions) (_ *FullCertificateAuthority, err error) { func NewCA(ctx context.Context, opts NewCAOptions) (_ *FullCertificateAuthority, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var ( var (
@ -201,12 +201,12 @@ func NewCA(ctx context.Context, opts NewCAOptions) (_ *FullCertificateAuthority,
return ca, nil return ca, nil
} }
// Status returns the status of the CA cert/key files for the config // Status returns the status of the CA cert/key files for the config.
func (caS CASetupConfig) Status() (TLSFilesStatus, error) { func (caS CASetupConfig) Status() (TLSFilesStatus, error) {
return statTLSFiles(caS.CertPath, caS.KeyPath) return statTLSFiles(caS.CertPath, caS.KeyPath)
} }
// Create generates and saves a CA using the config // Create generates and saves a CA using the config.
func (caS CASetupConfig) Create(ctx context.Context, logger io.Writer) (*FullCertificateAuthority, error) { func (caS CASetupConfig) Create(ctx context.Context, logger io.Writer) (*FullCertificateAuthority, error) {
var ( var (
err error err error
@ -249,7 +249,7 @@ func (caS CASetupConfig) Create(ctx context.Context, logger io.Writer) (*FullCer
return ca, caC.Save(ca) return ca, caC.Save(ca)
} }
// FullConfig converts a `CASetupConfig` to `FullCAConfig` // FullConfig converts a `CASetupConfig` to `FullCAConfig`.
func (caS CASetupConfig) FullConfig() FullCAConfig { func (caS CASetupConfig) FullConfig() FullCAConfig {
return FullCAConfig{ return FullCAConfig{
CertPath: caS.CertPath, CertPath: caS.CertPath,
@ -257,7 +257,7 @@ func (caS CASetupConfig) FullConfig() FullCAConfig {
} }
} }
// Load loads a CA from the given configuration // Load loads a CA from the given configuration.
func (fc FullCAConfig) Load() (*FullCertificateAuthority, error) { func (fc FullCAConfig) Load() (*FullCertificateAuthority, error) {
p, err := fc.PeerConfig().Load() p, err := fc.PeerConfig().Load()
if err != nil { if err != nil {
@ -281,14 +281,14 @@ func (fc FullCAConfig) Load() (*FullCertificateAuthority, error) {
}, nil }, nil
} }
// PeerConfig converts a full ca config to a peer ca config // PeerConfig converts a full ca config to a peer ca config.
func (fc FullCAConfig) PeerConfig() PeerCAConfig { func (fc FullCAConfig) PeerConfig() PeerCAConfig {
return PeerCAConfig{ return PeerCAConfig{
CertPath: fc.CertPath, CertPath: fc.CertPath,
} }
} }
// Save saves a CA with the given configuration // Save saves a CA with the given configuration.
func (fc FullCAConfig) Save(ca *FullCertificateAuthority) error { func (fc FullCAConfig) Save(ca *FullCertificateAuthority) error {
var ( var (
keyData bytes.Buffer keyData bytes.Buffer
@ -313,7 +313,7 @@ func (fc FullCAConfig) Save(ca *FullCertificateAuthority) error {
return writeErrs.Err() return writeErrs.Err()
} }
// SaveBackup saves the certificate of the config wth a timestamped filename // SaveBackup saves the certificate of the config wth a timestamped filename.
func (fc FullCAConfig) SaveBackup(ca *FullCertificateAuthority) error { func (fc FullCAConfig) SaveBackup(ca *FullCertificateAuthority) error {
return FullCAConfig{ return FullCAConfig{
CertPath: backupPath(fc.CertPath), CertPath: backupPath(fc.CertPath),
@ -321,7 +321,7 @@ func (fc FullCAConfig) SaveBackup(ca *FullCertificateAuthority) error {
}.Save(ca) }.Save(ca)
} }
// Load loads a CA from the given configuration // Load loads a CA from the given configuration.
func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) { func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) {
chainPEM, err := ioutil.ReadFile(pc.CertPath) chainPEM, err := ioutil.ReadFile(pc.CertPath)
if err != nil { if err != nil {
@ -350,7 +350,7 @@ func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) {
}, nil }, nil
} }
// Save saves a peer CA (cert, no key) with the given configuration // Save saves a peer CA (cert, no key) with the given configuration.
func (pc PeerCAConfig) Save(ca *PeerCertificateAuthority) error { func (pc PeerCAConfig) Save(ca *PeerCertificateAuthority) error {
var ( var (
certData bytes.Buffer certData bytes.Buffer
@ -373,7 +373,7 @@ func (pc PeerCAConfig) Save(ca *PeerCertificateAuthority) error {
return nil return nil
} }
// SaveBackup saves the certificate of the config wth a timestamped filename // SaveBackup saves the certificate of the config wth a timestamped filename.
func (pc PeerCAConfig) SaveBackup(ca *PeerCertificateAuthority) error { func (pc PeerCAConfig) SaveBackup(ca *PeerCertificateAuthority) error {
return PeerCAConfig{ return PeerCAConfig{
CertPath: backupPath(pc.CertPath), CertPath: backupPath(pc.CertPath),
@ -422,12 +422,12 @@ func (ca *FullCertificateAuthority) NewIdentity(exts ...pkix.Extension) (*FullId
} }
// Chain returns the CA's certificate chain // Chain returns the CA's certificate chain.
func (ca *FullCertificateAuthority) Chain() []*x509.Certificate { func (ca *FullCertificateAuthority) Chain() []*x509.Certificate {
return append([]*x509.Certificate{ca.Cert}, ca.RestChain...) return append([]*x509.Certificate{ca.Cert}, ca.RestChain...)
} }
// RawChain returns the CA's certificate chain as a 2d byte slice // RawChain returns the CA's certificate chain as a 2d byte slice.
func (ca *FullCertificateAuthority) RawChain() [][]byte { func (ca *FullCertificateAuthority) RawChain() [][]byte {
chain := ca.Chain() chain := ca.Chain()
rawChain := make([][]byte, len(chain)) rawChain := make([][]byte, len(chain))
@ -437,7 +437,7 @@ func (ca *FullCertificateAuthority) RawChain() [][]byte {
return rawChain return rawChain
} }
// RawRestChain returns the "rest" (excluding `ca.Cert`) of the certificate chain as a 2d byte slice // RawRestChain returns the "rest" (excluding `ca.Cert`) of the certificate chain as a 2d byte slice.
func (ca *FullCertificateAuthority) RawRestChain() [][]byte { func (ca *FullCertificateAuthority) RawRestChain() [][]byte {
var chain [][]byte var chain [][]byte
for _, cert := range ca.RestChain { for _, cert := range ca.RestChain {
@ -446,7 +446,7 @@ func (ca *FullCertificateAuthority) RawRestChain() [][]byte {
return chain return chain
} }
// PeerCA converts a FullCertificateAuthority to a PeerCertificateAuthority // PeerCA converts a FullCertificateAuthority to a PeerCertificateAuthority.
func (ca *FullCertificateAuthority) PeerCA() *PeerCertificateAuthority { func (ca *FullCertificateAuthority) PeerCA() *PeerCertificateAuthority {
return &PeerCertificateAuthority{ return &PeerCertificateAuthority{
Cert: ca.Cert, Cert: ca.Cert,
@ -455,7 +455,7 @@ func (ca *FullCertificateAuthority) PeerCA() *PeerCertificateAuthority {
} }
} }
// Sign signs the passed certificate with ca certificate // Sign signs the passed certificate with ca certificate.
func (ca *FullCertificateAuthority) Sign(cert *x509.Certificate) (*x509.Certificate, error) { func (ca *FullCertificateAuthority) Sign(cert *x509.Certificate) (*x509.Certificate, error) {
signedCert, err := peertls.CreateCertificate(cert.PublicKey, ca.Key, cert, ca.Cert) signedCert, err := peertls.CreateCertificate(cert.PublicKey, ca.Key, cert, ca.Cert)
if err != nil { if err != nil {

View file

@ -50,7 +50,7 @@ func GenerateKey(ctx context.Context, minDifficulty uint16, version storj.IDVers
} }
// GenerateCallback indicates that key generation is done when done is true. // GenerateCallback indicates that key generation is done when done is true.
// if err != nil key generation will stop with that error // if err != nil key generation will stop with that error.
type GenerateCallback func(crypto.PrivateKey, storj.NodeID) (done bool, err error) type GenerateCallback func(crypto.PrivateKey, storj.NodeID) (done bool, err error)
// GenerateKeys continues to generate keys until found returns done == false, // GenerateKeys continues to generate keys until found returns done == false,

View file

@ -295,12 +295,12 @@ func NewManageableFullIdentity(ident *FullIdentity, ca *FullCertificateAuthority
} }
} }
// Status returns the status of the identity cert/key files for the config // Status returns the status of the identity cert/key files for the config.
func (is SetupConfig) Status() (TLSFilesStatus, error) { func (is SetupConfig) Status() (TLSFilesStatus, error) {
return statTLSFiles(is.CertPath, is.KeyPath) return statTLSFiles(is.CertPath, is.KeyPath)
} }
// Create generates and saves a CA using the config // Create generates and saves a CA using the config.
func (is SetupConfig) Create(ca *FullCertificateAuthority) (*FullIdentity, error) { func (is SetupConfig) Create(ca *FullCertificateAuthority) (*FullIdentity, error) {
fi, err := ca.NewIdentity() fi, err := ca.NewIdentity()
if err != nil { if err != nil {
@ -314,7 +314,7 @@ func (is SetupConfig) Create(ca *FullCertificateAuthority) (*FullIdentity, error
return fi, ic.Save(fi) return fi, ic.Save(fi)
} }
// FullConfig converts a `SetupConfig` to `Config` // FullConfig converts a `SetupConfig` to `Config`.
func (is SetupConfig) FullConfig() Config { func (is SetupConfig) FullConfig() Config {
return Config{ return Config{
CertPath: is.CertPath, CertPath: is.CertPath,
@ -322,7 +322,7 @@ func (is SetupConfig) FullConfig() Config {
} }
} }
// Load loads a FullIdentity from the config // Load loads a FullIdentity from the config.
func (ic Config) Load() (*FullIdentity, error) { func (ic Config) Load() (*FullIdentity, error) {
c, err := ioutil.ReadFile(ic.CertPath) c, err := ioutil.ReadFile(ic.CertPath)
if err != nil { if err != nil {
@ -340,7 +340,7 @@ func (ic Config) Load() (*FullIdentity, error) {
return fi, nil return fi, nil
} }
// Save saves a FullIdentity according to the config // Save saves a FullIdentity according to the config.
func (ic Config) Save(fi *FullIdentity) error { func (ic Config) Save(fi *FullIdentity) error {
var ( var (
certData, keyData bytes.Buffer certData, keyData bytes.Buffer
@ -371,7 +371,7 @@ func (ic Config) Save(fi *FullIdentity) error {
) )
} }
// SaveBackup saves the certificate of the config with a timestamped filename // SaveBackup saves the certificate of the config with a timestamped filename.
func (ic Config) SaveBackup(fi *FullIdentity) error { func (ic Config) SaveBackup(fi *FullIdentity) error {
return Config{ return Config{
CertPath: backupPath(ic.CertPath), CertPath: backupPath(ic.CertPath),
@ -379,14 +379,14 @@ func (ic Config) SaveBackup(fi *FullIdentity) error {
}.Save(fi) }.Save(fi)
} }
// PeerConfig converts a Config to a PeerConfig // PeerConfig converts a Config to a PeerConfig.
func (ic Config) PeerConfig() *PeerConfig { func (ic Config) PeerConfig() *PeerConfig {
return &PeerConfig{ return &PeerConfig{
CertPath: ic.CertPath, CertPath: ic.CertPath,
} }
} }
// Load loads a PeerIdentity from the config // Load loads a PeerIdentity from the config.
func (ic PeerConfig) Load() (*PeerIdentity, error) { func (ic PeerConfig) Load() (*PeerIdentity, error) {
c, err := ioutil.ReadFile(ic.CertPath) c, err := ioutil.ReadFile(ic.CertPath)
if err != nil { if err != nil {
@ -399,7 +399,7 @@ func (ic PeerConfig) Load() (*PeerIdentity, error) {
return pi, nil return pi, nil
} }
// Save saves a PeerIdentity according to the config // Save saves a PeerIdentity according to the config.
func (ic PeerConfig) Save(peerIdent *PeerIdentity) error { func (ic PeerConfig) Save(peerIdent *PeerIdentity) error {
chain := []*x509.Certificate{peerIdent.Leaf, peerIdent.CA} chain := []*x509.Certificate{peerIdent.Leaf, peerIdent.CA}
chain = append(chain, peerIdent.RestChain...) chain = append(chain, peerIdent.RestChain...)
@ -417,19 +417,19 @@ func (ic PeerConfig) Save(peerIdent *PeerIdentity) error {
return nil return nil
} }
// SaveBackup saves the certificate of the config with a timestamped filename // SaveBackup saves the certificate of the config with a timestamped filename.
func (ic PeerConfig) SaveBackup(pi *PeerIdentity) error { func (ic PeerConfig) SaveBackup(pi *PeerIdentity) error {
return PeerConfig{ return PeerConfig{
CertPath: backupPath(ic.CertPath), CertPath: backupPath(ic.CertPath),
}.Save(pi) }.Save(pi)
} }
// Chain returns the Identity's certificate chain // Chain returns the Identity's certificate chain.
func (fi *FullIdentity) Chain() []*x509.Certificate { func (fi *FullIdentity) Chain() []*x509.Certificate {
return append([]*x509.Certificate{fi.Leaf, fi.CA}, fi.RestChain...) return append([]*x509.Certificate{fi.Leaf, fi.CA}, fi.RestChain...)
} }
// RawChain returns all of the certificate chain as a 2d byte slice // RawChain returns all of the certificate chain as a 2d byte slice.
func (fi *FullIdentity) RawChain() [][]byte { func (fi *FullIdentity) RawChain() [][]byte {
chain := fi.Chain() chain := fi.Chain()
rawChain := make([][]byte, len(chain)) rawChain := make([][]byte, len(chain))
@ -439,7 +439,7 @@ func (fi *FullIdentity) RawChain() [][]byte {
return rawChain return rawChain
} }
// RawRestChain returns the rest (excluding leaf and CA) of the certificate chain as a 2d byte slice // RawRestChain returns the rest (excluding leaf and CA) of the certificate chain as a 2d byte slice.
func (fi *FullIdentity) RawRestChain() [][]byte { func (fi *FullIdentity) RawRestChain() [][]byte {
rawChain := make([][]byte, len(fi.RestChain)) rawChain := make([][]byte, len(fi.RestChain))
for _, cert := range fi.RestChain { for _, cert := range fi.RestChain {
@ -448,7 +448,7 @@ func (fi *FullIdentity) RawRestChain() [][]byte {
return rawChain return rawChain
} }
// PeerIdentity converts a FullIdentity into a PeerIdentity // PeerIdentity converts a FullIdentity into a PeerIdentity.
func (fi *FullIdentity) PeerIdentity() *PeerIdentity { func (fi *FullIdentity) PeerIdentity() *PeerIdentity {
return &PeerIdentity{ return &PeerIdentity{
CA: fi.CA, CA: fi.CA,
@ -508,7 +508,7 @@ func backupPath(path string) string {
) )
} }
// EncodePeerIdentity encodes the complete identity chain to bytes // EncodePeerIdentity encodes the complete identity chain to bytes.
func EncodePeerIdentity(pi *PeerIdentity) []byte { func EncodePeerIdentity(pi *PeerIdentity) []byte {
var chain []byte var chain []byte
chain = append(chain, pi.Leaf.Raw...) chain = append(chain, pi.Leaf.Raw...)
@ -519,7 +519,7 @@ func EncodePeerIdentity(pi *PeerIdentity) []byte {
return chain return chain
} }
// DecodePeerIdentity Decodes the bytes into complete identity chain // DecodePeerIdentity Decodes the bytes into complete identity chain.
func DecodePeerIdentity(ctx context.Context, chain []byte) (_ *PeerIdentity, err error) { func DecodePeerIdentity(ctx context.Context, chain []byte) (_ *PeerIdentity, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View file

@ -13,10 +13,10 @@ import (
"storj.io/common/fpath" "storj.io/common/fpath"
) )
// TLSFilesStatus is the status of keys // TLSFilesStatus is the status of keys.
type TLSFilesStatus int type TLSFilesStatus int
// Four possible outcomes for four files // Four possible outcomes for four files.
const ( const (
NoCertNoKey = TLSFilesStatus(iota) NoCertNoKey = TLSFilesStatus(iota)
CertNoKey CertNoKey
@ -25,11 +25,11 @@ const (
) )
var ( var (
// ErrZeroBytes is returned for zero slice // ErrZeroBytes is returned for zero slice.
ErrZeroBytes = errs.New("byte slice was unexpectedly empty") ErrZeroBytes = errs.New("byte slice was unexpectedly empty")
) )
// writeChainData writes data to path ensuring permissions are appropriate for a cert // writeChainData writes data to path ensuring permissions are appropriate for a cert.
func writeChainData(path string, data []byte) error { func writeChainData(path string, data []byte) error {
err := writeFile(path, 0744, 0644, data) err := writeFile(path, 0744, 0644, data)
if err != nil { if err != nil {
@ -38,7 +38,7 @@ func writeChainData(path string, data []byte) error {
return nil return nil
} }
// writeKeyData writes data to path ensuring permissions are appropriate for a cert // writeKeyData writes data to path ensuring permissions are appropriate for a cert.
func writeKeyData(path string, data []byte) error { func writeKeyData(path string, data []byte) error {
err := writeFile(path, 0700, 0600, data) err := writeFile(path, 0700, 0600, data)
if err != nil { if err != nil {
@ -47,7 +47,7 @@ func writeKeyData(path string, data []byte) error {
return nil return nil
} }
// writeFile writes to path, creating directories and files with the necessary permissions // writeFile writes to path, creating directories and files with the necessary permissions.
func writeFile(path string, dirmode, filemode os.FileMode, data []byte) error { func writeFile(path string, dirmode, filemode os.FileMode, data []byte) error {
if err := os.MkdirAll(filepath.Dir(path), dirmode); err != nil { if err := os.MkdirAll(filepath.Dir(path), dirmode); err != nil {
return errs.Wrap(err) return errs.Wrap(err)

View file

@ -1,51 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// Package grpchook exists to avoid introducing a dependency to
// grpc unless pb/pbgrpc is imported in other packages.
package grpchook
import (
"context"
"crypto/tls"
"errors"
"net"
)
// ErrNotHooked is returned from funcs when pb/pbgrpc hasn't been imported.
var ErrNotHooked = errors.New("grpc not hooked")
// HookedErrServerStopped is the grpc.ErrServerStopped when initialized.
var HookedErrServerStopped error
// IsErrServerStopped returns when err == grpc.ErrServerStopped and
// pb/pbgrpc has been imported.
func IsErrServerStopped(err error) bool {
if HookedErrServerStopped == nil {
return false
}
return HookedErrServerStopped == err
}
// HookedInternalFromContext returns grpc peer information from context.
var HookedInternalFromContext func(ctx context.Context) (addr net.Addr, state tls.ConnectionState, err error)
// InternalFromContext returns the peer that was previously associated by NewContext using grpc.
func InternalFromContext(ctx context.Context) (addr net.Addr, state tls.ConnectionState, err error) {
if HookedInternalFromContext == nil {
return nil, tls.ConnectionState{}, ErrNotHooked
}
return HookedInternalFromContext(ctx)
}
// StatusCode is rpcstatus.Code, however it cannot use it directly without introducing a
// circular dependency.
type StatusCode uint64
// HookedErrorWrap is the func to wrap a status code.
var HookedErrorWrap func(code StatusCode, err error) error
// HookedConvertToStatusCode tries to convert grpc error status to rpcstatus.StatusCode
var HookedConvertToStatusCode func(err error) (StatusCode, bool)

View file

@ -16,40 +16,40 @@ import (
) )
var ( var (
// Error is a general API Key error // Error is a general API Key error.
Error = errs.Class("api key error") Error = errs.Class("api key error")
// ErrFormat means that the structural formatting of the API Key is invalid // ErrFormat means that the structural formatting of the API Key is invalid.
ErrFormat = errs.Class("api key format error") ErrFormat = errs.Class("api key format error")
// ErrInvalid means that the API Key is improperly signed // ErrInvalid means that the API Key is improperly signed.
ErrInvalid = errs.Class("api key invalid error") ErrInvalid = errs.Class("api key invalid error")
// ErrUnauthorized means that the API key does not grant the requested permission // ErrUnauthorized means that the API key does not grant the requested permission.
ErrUnauthorized = errs.Class("api key unauthorized error") ErrUnauthorized = errs.Class("api key unauthorized error")
// ErrRevoked means the API key has been revoked // ErrRevoked means the API key has been revoked.
ErrRevoked = errs.Class("api key revocation error") ErrRevoked = errs.Class("api key revocation error")
mon = monkit.Package() mon = monkit.Package()
) )
// ActionType specifies the operation type being performed that the Macaroon will validate // ActionType specifies the operation type being performed that the Macaroon will validate.
type ActionType int type ActionType int
const ( const (
// not using iota because these values are persisted in macaroons // not using iota because these values are persisted in macaroons.
_ ActionType = 0 _ ActionType = 0
// ActionRead specifies a read operation // ActionRead specifies a read operation.
ActionRead ActionType = 1 ActionRead ActionType = 1
// ActionWrite specifies a read operation // ActionWrite specifies a read operation.
ActionWrite ActionType = 2 ActionWrite ActionType = 2
// ActionList specifies a read operation // ActionList specifies a read operation.
ActionList ActionType = 3 ActionList ActionType = 3
// ActionDelete specifies a read operation // ActionDelete specifies a read operation.
ActionDelete ActionType = 4 ActionDelete ActionType = 4
// ActionProjectInfo requests project-level information // ActionProjectInfo requests project-level information.
ActionProjectInfo ActionType = 5 ActionProjectInfo ActionType = 5
) )
// Action specifies the specific operation being performed that the Macaroon will validate // Action specifies the specific operation being performed that the Macaroon will validate.
type Action struct { type Action struct {
Op ActionType Op ActionType
Bucket []byte Bucket []byte
@ -86,8 +86,8 @@ func ParseRawAPIKey(data []byte) (*APIKey, error) {
return &APIKey{mac: mac}, nil return &APIKey{mac: mac}, nil
} }
// NewAPIKey generates a brand new unrestricted API key given the provided // NewAPIKey generates a brand new unrestricted API key given the provided.
// server project secret // server project secret.
func NewAPIKey(secret []byte) (*APIKey, error) { func NewAPIKey(secret []byte) (*APIKey, error) {
mac, err := NewUnrestricted(secret) mac, err := NewUnrestricted(secret)
if err != nil { if err != nil {
@ -134,13 +134,13 @@ func (a *APIKey) Check(ctx context.Context, secret []byte, action Action, revoke
// AllowedBuckets stores information about which buckets are // AllowedBuckets stores information about which buckets are
// allowed to be accessed, where `Buckets` stores names of buckets that are // allowed to be accessed, where `Buckets` stores names of buckets that are
// allowed and `All` is a bool that indicates if all buckets are allowed or not // allowed and `All` is a bool that indicates if all buckets are allowed or not.
type AllowedBuckets struct { type AllowedBuckets struct {
All bool All bool
Buckets map[string]struct{} Buckets map[string]struct{}
} }
// GetAllowedBuckets returns a list of all the allowed bucket paths that match the Action operation // GetAllowedBuckets returns a list of all the allowed bucket paths that match the Action operation.
func (a *APIKey) GetAllowedBuckets(ctx context.Context, action Action) (allowed AllowedBuckets, err error) { func (a *APIKey) GetAllowedBuckets(ctx context.Context, action Action) (allowed AllowedBuckets, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -211,12 +211,12 @@ func (a *APIKey) Tail() []byte {
return a.mac.Tail() return a.mac.Tail()
} }
// Serialize serializes the API Key to a string // Serialize serializes the API Key to a string.
func (a *APIKey) Serialize() string { func (a *APIKey) Serialize() string {
return base58.CheckEncode(a.mac.Serialize(), 0) return base58.CheckEncode(a.mac.Serialize(), 0)
} }
// SerializeRaw serialize the API Key to raw bytes // SerializeRaw serialize the API Key to raw bytes.
func (a *APIKey) SerializeRaw() []byte { func (a *APIKey) SerializeRaw() []byte {
return a.mac.Serialize() return a.mac.Serialize()
} }

View file

@ -10,14 +10,14 @@ import (
"crypto/subtle" "crypto/subtle"
) )
// Macaroon is a struct that determine contextual caveats and authorization // Macaroon is a struct that determine contextual caveats and authorization.
type Macaroon struct { type Macaroon struct {
head []byte head []byte
caveats [][]byte caveats [][]byte
tail []byte tail []byte
} }
// NewUnrestricted creates Macaroon with random Head and generated Tail // NewUnrestricted creates Macaroon with random Head and generated Tail.
func NewUnrestricted(secret []byte) (*Macaroon, error) { func NewUnrestricted(secret []byte) (*Macaroon, error) {
head, err := NewSecret() head, err := NewSecret()
if err != nil { if err != nil {
@ -40,7 +40,7 @@ func sign(secret []byte, data []byte) []byte {
return signer.Sum(nil) return signer.Sum(nil)
} }
// NewSecret generates cryptographically random 32 bytes // NewSecret generates cryptographically random 32 bytes.
func NewSecret() (secret []byte, err error) { func NewSecret() (secret []byte, err error) {
secret = make([]byte, 32) secret = make([]byte, 32)
@ -52,7 +52,7 @@ func NewSecret() (secret []byte, err error) {
return secret, nil return secret, nil
} }
// AddFirstPartyCaveat creates signed macaroon with appended caveat // AddFirstPartyCaveat creates signed macaroon with appended caveat.
func (m *Macaroon) AddFirstPartyCaveat(c []byte) (macaroon *Macaroon, err error) { func (m *Macaroon) AddFirstPartyCaveat(c []byte) (macaroon *Macaroon, err error) {
macaroon = m.Copy() macaroon = m.Copy()
@ -63,7 +63,7 @@ func (m *Macaroon) AddFirstPartyCaveat(c []byte) (macaroon *Macaroon, err error)
} }
// Validate reconstructs with all caveats from the secret and compares tails, // Validate reconstructs with all caveats from the secret and compares tails,
// returning true if the tails match // returning true if the tails match.
func (m *Macaroon) Validate(secret []byte) (ok bool) { func (m *Macaroon) Validate(secret []byte) (ok bool) {
tail := sign(secret, m.head) tail := sign(secret, m.head)
for _, cav := range m.caveats { for _, cav := range m.caveats {
@ -73,7 +73,7 @@ func (m *Macaroon) Validate(secret []byte) (ok bool) {
return subtle.ConstantTimeCompare(tail, m.tail) == 1 return subtle.ConstantTimeCompare(tail, m.tail) == 1
} }
// Tails returns all ancestor tails up to and including the current tail // Tails returns all ancestor tails up to and including the current tail.
func (m *Macaroon) Tails(secret []byte) [][]byte { func (m *Macaroon) Tails(secret []byte) [][]byte {
tails := make([][]byte, 0, len(m.caveats)+1) tails := make([][]byte, 0, len(m.caveats)+1)
tail := sign(secret, m.head) tail := sign(secret, m.head)
@ -85,7 +85,7 @@ func (m *Macaroon) Tails(secret []byte) [][]byte {
return tails return tails
} }
// Head returns copy of macaroon head // Head returns copy of macaroon head.
func (m *Macaroon) Head() (head []byte) { func (m *Macaroon) Head() (head []byte) {
if len(m.head) == 0 { if len(m.head) == 0 {
return nil return nil
@ -93,12 +93,12 @@ func (m *Macaroon) Head() (head []byte) {
return append([]byte(nil), m.head...) return append([]byte(nil), m.head...)
} }
// CaveatLen returns the number of caveats this macaroon has // CaveatLen returns the number of caveats this macaroon has.
func (m *Macaroon) CaveatLen() int { func (m *Macaroon) CaveatLen() int {
return len(m.caveats) return len(m.caveats)
} }
// Caveats returns copy of macaroon caveats // Caveats returns copy of macaroon caveats.
func (m *Macaroon) Caveats() (caveats [][]byte) { func (m *Macaroon) Caveats() (caveats [][]byte) {
if len(m.caveats) == 0 { if len(m.caveats) == 0 {
return nil return nil
@ -110,7 +110,7 @@ func (m *Macaroon) Caveats() (caveats [][]byte) {
return caveats return caveats
} }
// Tail returns copy of macaroon tail // Tail returns copy of macaroon tail.
func (m *Macaroon) Tail() (tail []byte) { func (m *Macaroon) Tail() (tail []byte) {
if len(m.tail) == 0 { if len(m.tail) == 0 {
return nil return nil
@ -118,7 +118,7 @@ func (m *Macaroon) Tail() (tail []byte) {
return append([]byte(nil), m.tail...) return append([]byte(nil), m.tail...)
} }
// Copy return copy of macaroon // Copy return copy of macaroon.
func (m *Macaroon) Copy() *Macaroon { func (m *Macaroon) Copy() *Macaroon {
return &Macaroon{ return &Macaroon{
head: m.Head(), head: m.Head(),

View file

@ -27,7 +27,7 @@ type packet struct {
data []byte data []byte
} }
// Serialize converts macaroon to binary format // Serialize converts macaroon to binary format.
func (m *Macaroon) Serialize() (data []byte) { func (m *Macaroon) Serialize() (data []byte) {
// Start data from version int // Start data from version int
data = append(data, version) data = append(data, version)
@ -59,7 +59,7 @@ func (m *Macaroon) Serialize() (data []byte) {
return data return data
} }
// serializePacket converts packet to binary // serializePacket converts packet to binary.
func serializePacket(data []byte, p packet) []byte { func serializePacket(data []byte, p packet) []byte {
data = appendVarint(data, int(p.fieldType)) data = appendVarint(data, int(p.fieldType))
data = appendVarint(data, len(p.data)) data = appendVarint(data, len(p.data))
@ -75,7 +75,7 @@ func appendVarint(data []byte, x int) []byte {
return append(data, buf[:n]...) return append(data, buf[:n]...)
} }
// ParseMacaroon converts binary to macaroon // ParseMacaroon converts binary to macaroon.
func ParseMacaroon(data []byte) (_ *Macaroon, err error) { func ParseMacaroon(data []byte) (_ *Macaroon, err error) {
if len(data) < 2 { if len(data) < 2 {
return nil, errors.New("empty macaroon") return nil, errors.New("empty macaroon")
@ -152,7 +152,7 @@ func ParseMacaroon(data []byte) (_ *Macaroon, err error) {
return &mac, nil return &mac, nil
} }
// parseSection returns data leftover and packet array // parseSection returns data leftover and packet array.
func parseSection(data []byte) ([]byte, []packet, error) { func parseSection(data []byte) ([]byte, []packet, error) {
prevFieldType := fieldType(-1) prevFieldType := fieldType(-1)
var packets []packet var packets []packet
@ -176,7 +176,7 @@ func parseSection(data []byte) ([]byte, []packet, error) {
} }
} }
// parsePacket returns data leftover and packet // parsePacket returns data leftover and packet.
func parsePacket(data []byte) ([]byte, packet, error) { func parsePacket(data []byte) ([]byte, packet, error) {
data, ft, err := parseVarint(data) data, ft, err := parseVarint(data)
if err != nil { if err != nil {

View file

@ -10,7 +10,7 @@ import (
"strings" "strings"
) )
// base 2 and base 10 sizes // base 2 and base 10 sizes.
const ( const (
B Size = 1 << (10 * iota) B Size = 1 << (10 * iota)
KiB KiB
@ -28,55 +28,55 @@ const (
EB Size = 1e18 EB Size = 1e18
) )
// Size implements flag.Value for collecting memory size in bytes // Size implements flag.Value for collecting memory size in bytes.
type Size int64 type Size int64
// Int returns bytes size as int // Int returns bytes size as int.
func (size Size) Int() int { return int(size) } func (size Size) Int() int { return int(size) }
// Int32 returns bytes size as int32 // Int32 returns bytes size as int32.
func (size Size) Int32() int32 { return int32(size) } func (size Size) Int32() int32 { return int32(size) }
// Int64 returns bytes size as int64 // Int64 returns bytes size as int64.
func (size Size) Int64() int64 { return int64(size) } func (size Size) Int64() int64 { return int64(size) }
// Float64 returns bytes size as float64 // Float64 returns bytes size as float64.
func (size Size) Float64() float64 { return float64(size) } func (size Size) Float64() float64 { return float64(size) }
// KiB returns size in kibibytes // KiB returns size in kibibytes.
func (size Size) KiB() float64 { return size.Float64() / KiB.Float64() } func (size Size) KiB() float64 { return size.Float64() / KiB.Float64() }
// MiB returns size in mebibytes // MiB returns size in mebibytes.
func (size Size) MiB() float64 { return size.Float64() / MiB.Float64() } func (size Size) MiB() float64 { return size.Float64() / MiB.Float64() }
// GiB returns size in gibibytes // GiB returns size in gibibytes.
func (size Size) GiB() float64 { return size.Float64() / GiB.Float64() } func (size Size) GiB() float64 { return size.Float64() / GiB.Float64() }
// TiB returns size in tebibytes // TiB returns size in tebibytes.
func (size Size) TiB() float64 { return size.Float64() / TiB.Float64() } func (size Size) TiB() float64 { return size.Float64() / TiB.Float64() }
// PiB returns size in pebibytes // PiB returns size in pebibytes.
func (size Size) PiB() float64 { return size.Float64() / PiB.Float64() } func (size Size) PiB() float64 { return size.Float64() / PiB.Float64() }
// EiB returns size in exbibytes // EiB returns size in exbibytes.
func (size Size) EiB() float64 { return size.Float64() / EiB.Float64() } func (size Size) EiB() float64 { return size.Float64() / EiB.Float64() }
// KB returns size in kilobytes // KB returns size in kilobytes.
func (size Size) KB() float64 { return size.Float64() / KB.Float64() } func (size Size) KB() float64 { return size.Float64() / KB.Float64() }
// MB returns size in megabytes // MB returns size in megabytes.
func (size Size) MB() float64 { return size.Float64() / MB.Float64() } func (size Size) MB() float64 { return size.Float64() / MB.Float64() }
// GB returns size in gigabytes // GB returns size in gigabytes.
func (size Size) GB() float64 { return size.Float64() / GB.Float64() } func (size Size) GB() float64 { return size.Float64() / GB.Float64() }
// TB returns size in terabytes // TB returns size in terabytes.
func (size Size) TB() float64 { return size.Float64() / TB.Float64() } func (size Size) TB() float64 { return size.Float64() / TB.Float64() }
// PB returns size in petabytes // PB returns size in petabytes.
func (size Size) PB() float64 { return size.Float64() / PB.Float64() } func (size Size) PB() float64 { return size.Float64() / PB.Float64() }
// EB returns size in exabytes // EB returns size in exabytes.
func (size Size) EB() float64 { return size.Float64() / EB.Float64() } func (size Size) EB() float64 { return size.Float64() / EB.Float64() }
// String converts size to a string using base-2 prefixes, unless the number // String converts size to a string using base-2 prefixes, unless the number
@ -99,7 +99,7 @@ func countZeros(num, base int64) (count int) {
return count return count
} }
// Base2String converts size to a string using base-2 prefixes // Base2String converts size to a string using base-2 prefixes.
func (size Size) Base2String() string { func (size Size) Base2String() string {
if size == 0 { if size == 0 {
return "0 B" return "0 B"
@ -123,7 +123,7 @@ func (size Size) Base2String() string {
return strconv.FormatInt(size.Int64(), 10) + " B" return strconv.FormatInt(size.Int64(), 10) + " B"
} }
// Base10String converts size to a string using base-10 prefixes // Base10String converts size to a string using base-10 prefixes.
func (size Size) Base10String() string { func (size Size) Base10String() string {
if size == 0 { if size == 0 {
return "0 B" return "0 B"
@ -158,7 +158,7 @@ func isLetter(b byte) bool {
return ('a' <= b && b <= 'z') || ('A' <= b && b <= 'Z') return ('a' <= b && b <= 'z') || ('A' <= b && b <= 'Z')
} }
// Set updates value from string // Set updates value from string.
func (size *Size) Set(s string) error { func (size *Size) Set(s string) error {
if s == "" { if s == "" {
return errors.New("empty size") return errors.New("empty size")
@ -219,7 +219,7 @@ func (size *Size) Set(s string) error {
return nil return nil
} }
// Type implements pflag.Value // Type implements pflag.Value.
func (Size) Type() string { return "memory.Size" } func (Size) Type() string { return "memory.Size" }
// MarshalText returns size as a string. // MarshalText returns size as a string.

View file

@ -5,13 +5,13 @@ package memory
import "strings" import "strings"
// Sizes implements flag.Value for collecting memory size // Sizes implements flag.Value for collecting memory size.
type Sizes struct { type Sizes struct {
Default []Size Default []Size
Custom []Size Custom []Size
} }
// Sizes returns the loaded values // Sizes returns the loaded values.
func (sizes Sizes) Sizes() []Size { func (sizes Sizes) Sizes() []Size {
if len(sizes.Custom) > 0 { if len(sizes.Custom) > 0 {
return sizes.Custom return sizes.Custom
@ -19,7 +19,7 @@ func (sizes Sizes) Sizes() []Size {
return sizes.Default return sizes.Default
} }
// String converts values to a string // String converts values to a string.
func (sizes Sizes) String() string { func (sizes Sizes) String() string {
sz := sizes.Sizes() sz := sizes.Sizes()
xs := make([]string, len(sz)) xs := make([]string, len(sz))
@ -29,7 +29,7 @@ func (sizes Sizes) String() string {
return strings.Join(xs, " ") return strings.Join(xs, " ")
} }
// Set adds values from byte values // Set adds values from byte values.
func (sizes *Sizes) Set(s string) error { func (sizes *Sizes) Set(s string) error {
for _, x := range strings.Fields(s) { for _, x := range strings.Fields(s) {
var size Size var size Size

View file

@ -3,12 +3,12 @@
package memory package memory
// FormatBytes converts number of bytes to appropriately sized string // FormatBytes converts number of bytes to appropriately sized string.
func FormatBytes(bytes int64) string { func FormatBytes(bytes int64) string {
return Size(bytes).String() return Size(bytes).String()
} }
// ParseString converts string to number of bytes // ParseString converts string to number of bytes.
func ParseString(s string) (int64, error) { func ParseString(s string) (int64, error) {
var size Size var size Size
err := size.Set(s) err := size.Set(s)

View file

@ -9,7 +9,7 @@ import (
) )
// closeTrackingConn wraps a net.Conn and keeps track of if it was closed // closeTrackingConn wraps a net.Conn and keeps track of if it was closed
// or if it was leaked (and closes it if it was leaked.) // or if it was leaked (and closes it if it was leaked).
type closeTrackingConn struct { type closeTrackingConn struct {
net.Conn net.Conn
} }

View file

@ -61,7 +61,7 @@ func (path Unencrypted) Iterator() Iterator {
return NewIterator(path.raw) return NewIterator(path.raw)
} }
// Less returns true if 'path' should be sorted earlier than 'other' // Less returns true if 'path' should be sorted earlier than 'other'.
func (path Unencrypted) Less(other Unencrypted) bool { func (path Unencrypted) Less(other Unencrypted) bool {
return path.raw < other.raw return path.raw < other.raw
} }
@ -104,7 +104,7 @@ func (path Encrypted) Iterator() Iterator {
return NewIterator(path.raw) return NewIterator(path.raw)
} }
// Less returns true if 'path' should be sorted earlier than 'other' // Less returns true if 'path' should be sorted earlier than 'other'.
func (path Encrypted) Less(other Encrypted) bool { func (path Encrypted) Less(other Encrypted) bool {
return path.raw < other.raw return path.raw < other.raw
} }

View file

@ -23,7 +23,7 @@ var _ = time.Kitchen
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// InjuredSegment is the queue item used for the data repair queue // InjuredSegment is the queue item used for the data repair queue.
type InjuredSegment struct { type InjuredSegment struct {
Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
LostPieces []int32 `protobuf:"varint,2,rep,packed,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"` LostPieces []int32 `protobuf:"varint,2,rep,packed,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"`

View file

@ -8,7 +8,7 @@ import "google/protobuf/timestamp.proto";
package repair; package repair;
// InjuredSegment is the queue item used for the data repair queue // InjuredSegment is the queue item used for the data repair queue.
message InjuredSegment { message InjuredSegment {
bytes path = 1; bytes path = 1;
repeated int32 lost_pieces = 2; repeated int32 lost_pieces = 2;

View file

@ -146,7 +146,7 @@ func (m *InitiateGracefulExitRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_InitiateGracefulExitRequest proto.InternalMessageInfo var xxx_messageInfo_InitiateGracefulExitRequest proto.InternalMessageInfo
// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit // NonExitingSatellite contains information that's needed for a storagenode to start graceful exit.
type NonExitingSatellite struct { type NonExitingSatellite struct {
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
DomainName string `protobuf:"bytes,2,opt,name=domain_name,json=domainName,proto3" json:"domain_name,omitempty"` DomainName string `protobuf:"bytes,2,opt,name=domain_name,json=domainName,proto3" json:"domain_name,omitempty"`

View file

@ -11,7 +11,7 @@ import "orders.proto";
package gracefulexit; package gracefulexit;
// NodeGracefulExit is a private service on storagenodes // NodeGracefulExit is a private service on storagenodes.
service NodeGracefulExit { service NodeGracefulExit {
// GetSatellitesList returns a list of satellites that the storagenode has not exited. // GetSatellitesList returns a list of satellites that the storagenode has not exited.
rpc GetNonExitingSatellites(GetNonExitingSatellitesRequest) returns (GetNonExitingSatellitesResponse); rpc GetNonExitingSatellites(GetNonExitingSatellitesRequest) returns (GetNonExitingSatellitesResponse);
@ -27,7 +27,7 @@ message InitiateGracefulExitRequest {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
} }
// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit // NonExitingSatellite contains information that's needed for a storagenode to start graceful exit.
message NonExitingSatellite { message NonExitingSatellite {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
string domain_name = 2; string domain_name = 2;

View file

@ -26,7 +26,6 @@ var _ = time.Kitchen
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// ListSegments
type ListIrreparableSegmentsRequest struct { type ListIrreparableSegmentsRequest struct {
Limit int32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` Limit int32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
LastSeenSegmentPath []byte `protobuf:"bytes,2,opt,name=last_seen_segment_path,json=lastSeenSegmentPath,proto3" json:"last_seen_segment_path,omitempty"` LastSeenSegmentPath []byte `protobuf:"bytes,2,opt,name=last_seen_segment_path,json=lastSeenSegmentPath,proto3" json:"last_seen_segment_path,omitempty"`
@ -181,7 +180,6 @@ func (m *ListIrreparableSegmentsResponse) GetSegments() []*IrreparableSegment {
return nil return nil
} }
// CountNodes
type CountNodesResponse struct { type CountNodesResponse struct {
Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`

View file

@ -37,7 +37,6 @@ service HealthInspector {
rpc SegmentHealth(SegmentHealthRequest) returns (SegmentHealthResponse) {} rpc SegmentHealth(SegmentHealthRequest) returns (SegmentHealthResponse) {}
} }
// ListSegments
message ListIrreparableSegmentsRequest { message ListIrreparableSegmentsRequest {
int32 limit = 1; int32 limit = 1;
bytes last_seen_segment_path = 2; bytes last_seen_segment_path = 2;
@ -55,7 +54,6 @@ message ListIrreparableSegmentsResponse {
repeated IrreparableSegment segments = 1; repeated IrreparableSegment segments = 1;
} }
// CountNodes
message CountNodesResponse { message CountNodesResponse {
int64 count = 1; int64 count = 1;
} }

View file

@ -21,7 +21,7 @@ var _ = math.Inf
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// SerializableMeta is the object metadata that will be stored serialized // SerializableMeta is the object metadata that will be stored serialized.
type SerializableMeta struct { type SerializableMeta struct {
ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
UserDefined map[string]string `protobuf:"bytes,2,rep,name=user_defined,json=userDefined,proto3" json:"user_defined,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` UserDefined map[string]string `protobuf:"bytes,2,rep,name=user_defined,json=userDefined,proto3" json:"user_defined,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`

View file

@ -6,7 +6,7 @@ option go_package = "storj.io/common/pb";
package objects; package objects;
// SerializableMeta is the object metadata that will be stored serialized // SerializableMeta is the object metadata that will be stored serialized.
message SerializableMeta { message SerializableMeta {
string content_type = 1; string content_type = 1;
map<string, string> user_defined = 2; map<string, string> user_defined = 2;

View file

@ -2581,7 +2581,6 @@ func (m *ObjectFinishDeleteResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_ObjectFinishDeleteResponse proto.InternalMessageInfo var xxx_messageInfo_ObjectFinishDeleteResponse proto.InternalMessageInfo
// only for satellite use
type SatStreamID struct { type SatStreamID struct {
Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"`

View file

@ -17,7 +17,7 @@ import "orders.proto";
// * If someone will add stream_id or segment_id to existing or new message in this protobuf // * If someone will add stream_id or segment_id to existing or new message in this protobuf
// then Batch method (satellite/metainfo/batch.go) needs to be updated as well // then Batch method (satellite/metainfo/batch.go) needs to be updated as well
// Metainfo it's a satellite RPC service // Metainfo it's a satellite RPC service.
service Metainfo { service Metainfo {
// Bucket // Bucket
rpc CreateBucket(BucketCreateRequest) returns (BucketCreateResponse); rpc CreateBucket(BucketCreateRequest) returns (BucketCreateResponse);
@ -392,7 +392,10 @@ message ObjectFinishDeleteRequest {
message ObjectFinishDeleteResponse { message ObjectFinishDeleteResponse {
} }
// only for satellite use //
// Only for satellite use
//
message SatStreamID { message SatStreamID {
bytes bucket = 1; bytes bucket = 1;
bytes encrypted_path = 2; bytes encrypted_path = 2;

21
vendor/storj.io/common/pb/node.pb.go generated vendored
View file

@ -23,7 +23,7 @@ var _ = time.Kitchen
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// NodeType is an enum of possible node types // NodeType is an enum of possible node types.
type NodeType int32 type NodeType int32
const ( const (
@ -58,7 +58,7 @@ func (NodeType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{0} return fileDescriptor_0c843d59d2d938e7, []int{0}
} }
// NodeTransport is an enum of possible transports for the overlay network // NodeTransport is an enum of possible transports for the overlay network.
type NodeTransport int32 type NodeTransport int32
const ( const (
@ -81,9 +81,8 @@ func (NodeTransport) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{1} return fileDescriptor_0c843d59d2d938e7, []int{1}
} }
// TODO move statdb.Update() stuff out of here // Node represents a node in the overlay network.
// Node represents a node in the overlay network // Node is info for a updating a single storagenode, used in the Update rpc calls.
// Node is info for a updating a single storagenode, used in the Update rpc calls
type Node struct { type Node struct {
Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"` Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"`
Address *NodeAddress `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` Address *NodeAddress `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
@ -132,7 +131,7 @@ func (m *Node) GetDeprecatedLastIp() string {
return "" return ""
} }
// NodeAddress contains the information needed to communicate with a node on the network // NodeAddress contains the information needed to communicate with a node on the network.
type NodeAddress struct { type NodeAddress struct {
Transport NodeTransport `protobuf:"varint,1,opt,name=transport,proto3,enum=node.NodeTransport" json:"transport,omitempty"` Transport NodeTransport `protobuf:"varint,1,opt,name=transport,proto3,enum=node.NodeTransport" json:"transport,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
@ -179,7 +178,7 @@ func (m *NodeAddress) GetAddress() string {
return "" return ""
} }
// NodeOperator contains info about the storage node operator // NodeOperator contains info about the storage node operator.
type NodeOperator struct { type NodeOperator struct {
Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"`
Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"` Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"`
@ -226,7 +225,7 @@ func (m *NodeOperator) GetWallet() string {
return "" return ""
} }
// NodeCapacity contains all relevant data about a nodes ability to store data // NodeCapacity contains all relevant data about a nodes ability to store data.
type NodeCapacity struct { type NodeCapacity struct {
FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"` // Deprecated: Do not use. FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"` // Deprecated: Do not use.
FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"` FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"`
@ -274,7 +273,7 @@ func (m *NodeCapacity) GetFreeDisk() int64 {
return 0 return 0
} }
// Deprecated: use NodeOperator instead // Deprecated: use NodeOperator instead.
type NodeMetadata struct { type NodeMetadata struct {
Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"`
Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"` Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"`
@ -321,7 +320,7 @@ func (m *NodeMetadata) GetWallet() string {
return "" return ""
} }
// Deprecated: use NodeCapacity instead // Deprecated: use NodeCapacity instead.
type NodeRestrictions struct { type NodeRestrictions struct {
FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"` FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"`
FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"` FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"`
@ -368,7 +367,7 @@ func (m *NodeRestrictions) GetFreeDisk() int64 {
return 0 return 0
} }
// NodeVersion contains // NodeVersion contains version information about a node.
type NodeVersion struct { type NodeVersion struct {
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
CommitHash string `protobuf:"bytes,2,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"` CommitHash string `protobuf:"bytes,2,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"`

21
vendor/storj.io/common/pb/node.proto generated vendored
View file

@ -9,9 +9,8 @@ package node;
import "gogo.proto"; import "gogo.proto";
import "google/protobuf/timestamp.proto"; import "google/protobuf/timestamp.proto";
// TODO move statdb.Update() stuff out of here // Node represents a node in the overlay network.
// Node represents a node in the overlay network // Node is info for a updating a single storagenode, used in the Update rpc calls.
// Node is info for a updating a single storagenode, used in the Update rpc calls
message Node { message Node {
bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
NodeAddress address = 2; NodeAddress address = 2;
@ -20,7 +19,7 @@ message Node {
reserved "type", "restrictions", "reputation", "metadata", "latency_list", "audit_success", "is_up", "update_latency", "update_audit_success", "update_uptime", "version"; reserved "type", "restrictions", "reputation", "metadata", "latency_list", "audit_success", "is_up", "update_latency", "update_audit_success", "update_uptime", "version";
} }
// NodeType is an enum of possible node types // NodeType is an enum of possible node types.
enum NodeType { enum NodeType {
INVALID = 0; INVALID = 0;
SATELLITE = 1; SATELLITE = 1;
@ -29,42 +28,42 @@ enum NodeType {
BOOTSTRAP = 4 [deprecated=true]; BOOTSTRAP = 4 [deprecated=true];
} }
// NodeAddress contains the information needed to communicate with a node on the network // NodeAddress contains the information needed to communicate with a node on the network.
message NodeAddress { message NodeAddress {
NodeTransport transport = 1; NodeTransport transport = 1;
string address = 2; string address = 2;
} }
// NodeTransport is an enum of possible transports for the overlay network // NodeTransport is an enum of possible transports for the overlay network.
enum NodeTransport { enum NodeTransport {
TCP_TLS_GRPC = 0; TCP_TLS_GRPC = 0;
} }
// NodeOperator contains info about the storage node operator // NodeOperator contains info about the storage node operator.
message NodeOperator { message NodeOperator {
string email = 1; string email = 1;
string wallet = 2; string wallet = 2;
} }
// NodeCapacity contains all relevant data about a nodes ability to store data // NodeCapacity contains all relevant data about a nodes ability to store data.
message NodeCapacity { message NodeCapacity {
int64 free_bandwidth = 1 [deprecated=true]; int64 free_bandwidth = 1 [deprecated=true];
int64 free_disk = 2; int64 free_disk = 2;
} }
// Deprecated: use NodeOperator instead // Deprecated: use NodeOperator instead.
message NodeMetadata { message NodeMetadata {
string email = 1; string email = 1;
string wallet = 2; string wallet = 2;
} }
// Deprecated: use NodeCapacity instead // Deprecated: use NodeCapacity instead.
message NodeRestrictions { message NodeRestrictions {
int64 free_bandwidth = 1; int64 free_bandwidth = 1;
int64 free_disk = 2; int64 free_disk = 2;
} }
// NodeVersion contains // NodeVersion contains version information about a node.
message NodeVersion { message NodeVersion {
string version = 1; // must be semver formatted string version = 1; // must be semver formatted
string commit_hash = 2; string commit_hash = 2;

View file

@ -32,6 +32,9 @@ type ReputationStats struct {
ReputationAlpha float64 `protobuf:"fixed64,3,opt,name=reputation_alpha,json=reputationAlpha,proto3" json:"reputation_alpha,omitempty"` ReputationAlpha float64 `protobuf:"fixed64,3,opt,name=reputation_alpha,json=reputationAlpha,proto3" json:"reputation_alpha,omitempty"`
ReputationBeta float64 `protobuf:"fixed64,4,opt,name=reputation_beta,json=reputationBeta,proto3" json:"reputation_beta,omitempty"` ReputationBeta float64 `protobuf:"fixed64,4,opt,name=reputation_beta,json=reputationBeta,proto3" json:"reputation_beta,omitempty"`
ReputationScore float64 `protobuf:"fixed64,5,opt,name=reputation_score,json=reputationScore,proto3" json:"reputation_score,omitempty"` ReputationScore float64 `protobuf:"fixed64,5,opt,name=reputation_score,json=reputationScore,proto3" json:"reputation_score,omitempty"`
UnknownReputationAlpha float64 `protobuf:"fixed64,6,opt,name=unknown_reputation_alpha,json=unknownReputationAlpha,proto3" json:"unknown_reputation_alpha,omitempty"`
UnknownReputationBeta float64 `protobuf:"fixed64,7,opt,name=unknown_reputation_beta,json=unknownReputationBeta,proto3" json:"unknown_reputation_beta,omitempty"`
UnknownReputationScore float64 `protobuf:"fixed64,8,opt,name=unknown_reputation_score,json=unknownReputationScore,proto3" json:"unknown_reputation_score,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -96,6 +99,27 @@ func (m *ReputationStats) GetReputationScore() float64 {
return 0 return 0
} }
func (m *ReputationStats) GetUnknownReputationAlpha() float64 {
if m != nil {
return m.UnknownReputationAlpha
}
return 0
}
func (m *ReputationStats) GetUnknownReputationBeta() float64 {
if m != nil {
return m.UnknownReputationBeta
}
return 0
}
func (m *ReputationStats) GetUnknownReputationScore() float64 {
if m != nil {
return m.UnknownReputationScore
}
return 0
}
type GetStatsRequest struct { type GetStatsRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -435,51 +459,53 @@ func init() {
func init() { proto.RegisterFile("nodestats.proto", fileDescriptor_e0b184ee117142aa) } func init() { proto.RegisterFile("nodestats.proto", fileDescriptor_e0b184ee117142aa) }
var fileDescriptor_e0b184ee117142aa = []byte{ var fileDescriptor_e0b184ee117142aa = []byte{
// 691 bytes of a gzipped FileDescriptorProto // 733 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x4e, 0xdb, 0x4a, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
0x18, 0xbd, 0x76, 0xb8, 0x5c, 0xf2, 0x25, 0x10, 0x18, 0xc2, 0x55, 0x6e, 0xae, 0xd4, 0xa0, 0x50, 0x14, 0xfd, 0xec, 0xf4, 0x4b, 0x93, 0x9b, 0xb4, 0x69, 0xa7, 0x69, 0x09, 0x41, 0x22, 0x55, 0x8a,
0x89, 0x74, 0x93, 0xa8, 0x29, 0x8b, 0x4a, 0x55, 0x17, 0x04, 0xa4, 0x96, 0x45, 0x7f, 0xe4, 0xd0, 0xd4, 0xb0, 0x49, 0x44, 0xa8, 0x50, 0x25, 0xc4, 0xa2, 0x69, 0x25, 0xe8, 0x82, 0x1f, 0x39, 0x65,
0x4d, 0x17, 0xb5, 0x26, 0x9e, 0x0f, 0x33, 0x90, 0x78, 0x8c, 0x67, 0xdc, 0xaa, 0xcb, 0x6e, 0xbb, 0xc3, 0x02, 0x6b, 0xe2, 0x99, 0xba, 0xd3, 0x26, 0x1e, 0xd7, 0x33, 0xa6, 0x62, 0x09, 0x4b, 0x56,
0xea, 0x1b, 0xf4, 0x75, 0xfa, 0x04, 0x5d, 0x74, 0x01, 0x7d, 0x94, 0x6a, 0x66, 0x9c, 0x3f, 0x03, 0xbc, 0x01, 0xaf, 0xc3, 0x33, 0xb0, 0x68, 0x79, 0x14, 0x34, 0x33, 0xce, 0x9f, 0x93, 0xd2, 0xb0,
0x25, 0x5d, 0xe6, 0x7c, 0xe7, 0x9c, 0x8c, 0xcf, 0x99, 0x6f, 0xa0, 0x12, 0x09, 0x86, 0x52, 0x51, 0xf4, 0xb9, 0xe7, 0x1c, 0x5f, 0xdd, 0x73, 0xe7, 0x42, 0x29, 0xe0, 0x84, 0x0a, 0x89, 0xa5, 0x68,
0x25, 0xdb, 0x71, 0x22, 0x94, 0x20, 0xc5, 0x09, 0x50, 0x87, 0x50, 0x84, 0xc2, 0xc2, 0xf5, 0x46, 0x86, 0x11, 0x97, 0x1c, 0xe5, 0x47, 0x40, 0x15, 0x7c, 0xee, 0x73, 0x03, 0x57, 0x6b, 0x3e, 0xe7,
0x28, 0x44, 0x38, 0xc4, 0x8e, 0xf9, 0x35, 0x48, 0x4f, 0x3a, 0x8a, 0x8f, 0x34, 0x6d, 0x14, 0x5b, 0x7e, 0x9f, 0xb6, 0xf4, 0x57, 0x2f, 0x3e, 0x6d, 0x49, 0x36, 0x50, 0xb4, 0x41, 0x68, 0x08, 0xf5,
0x42, 0xf3, 0xbb, 0x03, 0x15, 0x0f, 0xe3, 0x54, 0x51, 0xc5, 0x45, 0xd4, 0xd7, 0x06, 0xa4, 0x01, 0xaf, 0x19, 0x28, 0x39, 0x34, 0x8c, 0x25, 0x96, 0x8c, 0x07, 0x5d, 0x65, 0x80, 0x6a, 0x50, 0x90,
0x25, 0x25, 0x14, 0x1d, 0xfa, 0x81, 0x48, 0x23, 0x55, 0x73, 0xb6, 0x9d, 0x56, 0xc1, 0x03, 0x03, 0x5c, 0xe2, 0xbe, 0xeb, 0xf1, 0x38, 0x90, 0x15, 0x6b, 0xdb, 0x6a, 0x64, 0x1c, 0xd0, 0xd0, 0xa1,
0x1d, 0x68, 0x84, 0xec, 0xc0, 0xaa, 0x4c, 0x83, 0x00, 0xa5, 0xcc, 0x28, 0xae, 0xa1, 0x94, 0x33, 0x42, 0xd0, 0x0e, 0xac, 0x88, 0xd8, 0xf3, 0xa8, 0x10, 0x09, 0xc5, 0xd6, 0x94, 0x62, 0x02, 0x1a,
0xd0, 0x92, 0x1e, 0xc0, 0x7a, 0x32, 0x31, 0xf6, 0xe9, 0x30, 0x3e, 0xa5, 0xb5, 0xc2, 0xb6, 0xd3, 0xd2, 0x63, 0x58, 0x8b, 0x46, 0xc6, 0x2e, 0xee, 0x87, 0x67, 0xb8, 0x92, 0xd9, 0xb6, 0x1a, 0x96,
0x72, 0xbc, 0xca, 0x14, 0xdf, 0xd7, 0x30, 0xd9, 0x85, 0x19, 0xc8, 0x1f, 0xa0, 0xa2, 0xb5, 0x25, 0x53, 0x1a, 0xe3, 0x07, 0x0a, 0x46, 0xbb, 0x30, 0x01, 0xb9, 0x3d, 0x2a, 0x71, 0x65, 0x49, 0x33,
0xc3, 0x5c, 0x9b, 0xc2, 0x3d, 0x54, 0x34, 0xe7, 0x29, 0x03, 0x91, 0x60, 0xed, 0xef, 0xbc, 0x67, 0x57, 0xc7, 0x70, 0x87, 0x4a, 0x9c, 0xf2, 0x14, 0x1e, 0x8f, 0x68, 0xe5, 0xff, 0xb4, 0x67, 0x57,
0x5f, 0xc3, 0xcd, 0x0d, 0xa8, 0x3c, 0x43, 0x65, 0x3e, 0xc8, 0xc3, 0x8b, 0x14, 0xa5, 0x6a, 0x5e, 0xc1, 0x68, 0x1f, 0x2a, 0x71, 0x70, 0x11, 0xf0, 0xab, 0xc0, 0x9d, 0x69, 0x23, 0xab, 0x25, 0x5b,
0xb9, 0xb0, 0x3e, 0xc5, 0x64, 0x2c, 0x22, 0x89, 0xe4, 0x29, 0x94, 0xd3, 0x58, 0xa7, 0xe2, 0x07, 0x49, 0xdd, 0x49, 0x75, 0xf3, 0x0c, 0xee, 0xcd, 0x51, 0xea, 0xae, 0x96, 0xb5, 0x70, 0x73, 0x46,
0xa7, 0x18, 0x9c, 0x9b, 0xaf, 0x2d, 0x75, 0xeb, 0xed, 0x69, 0xc0, 0xb9, 0x78, 0xbc, 0x92, 0xe5, 0xa8, 0x9b, 0x9b, 0xff, 0x47, 0xd3, 0x64, 0xee, 0x96, 0x3f, 0xea, 0x5e, 0xeb, 0xeb, 0x50, 0x7a,
0x1f, 0x68, 0x3a, 0x79, 0x02, 0x25, 0x9a, 0x32, 0xae, 0x32, 0xb5, 0x7b, 0xa7, 0x1a, 0x0c, 0xdd, 0x49, 0xa5, 0x1e, 0xbe, 0x43, 0x2f, 0x63, 0x2a, 0x64, 0xfd, 0xc6, 0x86, 0xb5, 0x31, 0x26, 0x42,
0x8a, 0x9f, 0x43, 0x99, 0x71, 0x79, 0x91, 0xd2, 0x21, 0x3f, 0xe1, 0xc8, 0x4c, 0x3c, 0x5a, 0x6d, 0x1e, 0x08, 0x8a, 0x5e, 0x40, 0x31, 0x0e, 0x55, 0x82, 0xae, 0x77, 0x46, 0xbd, 0x0b, 0x9d, 0x4c,
0x4b, 0x6b, 0x8f, 0x4b, 0x6b, 0x1f, 0x8f, 0x4b, 0xeb, 0xad, 0x7c, 0xbb, 0x6c, 0x38, 0x5f, 0xae, 0xa1, 0x5d, 0x6d, 0x8e, 0x97, 0x21, 0x15, 0xa5, 0x53, 0x30, 0xfc, 0x43, 0x45, 0x47, 0xcf, 0xa1,
0x1a, 0x8e, 0x37, 0xa7, 0x24, 0x3d, 0x28, 0xca, 0x54, 0xc6, 0x18, 0x31, 0x64, 0x26, 0xbb, 0x45, 0x80, 0x63, 0xc2, 0x64, 0xa2, 0xb6, 0xef, 0x54, 0x83, 0xa6, 0x1b, 0xf1, 0x2b, 0x28, 0x12, 0x26,
0x6d, 0xa6, 0x32, 0xb2, 0x0f, 0xc5, 0x33, 0xc1, 0x23, 0x64, 0x3e, 0x55, 0x26, 0xd5, 0xbb, 0x3d, 0x2e, 0x63, 0xdc, 0x67, 0xa7, 0x8c, 0x12, 0x1d, 0xa5, 0x52, 0x9b, 0x05, 0x6b, 0x0e, 0x17, 0xac,
0xfe, 0x32, 0x1e, 0x2b, 0x56, 0xb6, 0xaf, 0x9a, 0x9f, 0x1d, 0xa8, 0x1d, 0x52, 0x3e, 0xfc, 0xd8, 0x79, 0x32, 0x5c, 0xb0, 0x4e, 0xee, 0xe7, 0x75, 0xcd, 0xfa, 0x7e, 0x53, 0xb3, 0x9c, 0x29, 0x25,
0x57, 0x22, 0xa1, 0x21, 0xbe, 0x91, 0x34, 0xc4, 0x2c, 0x7e, 0xf2, 0x18, 0x96, 0x4e, 0x12, 0x31, 0xea, 0x40, 0x5e, 0xc4, 0x22, 0xa4, 0x01, 0xa1, 0x44, 0xe7, 0xbc, 0xa8, 0xcd, 0x58, 0x86, 0x0e,
0x9a, 0x24, 0xbc, 0x88, 0xb5, 0x51, 0x90, 0x3d, 0x70, 0x95, 0x98, 0x64, 0xbb, 0x88, 0xce, 0x55, 0x20, 0x7f, 0xce, 0x59, 0x40, 0x89, 0x8b, 0xa5, 0xde, 0x80, 0xbb, 0x3d, 0xfe, 0xd3, 0x1e, 0x39,
0xa2, 0xf9, 0xd5, 0x85, 0xff, 0x6e, 0x38, 0x4c, 0xd6, 0xfb, 0x2e, 0xfc, 0xa3, 0x4b, 0xf2, 0x39, 0x23, 0x3b, 0x90, 0xf5, 0x6f, 0x16, 0x54, 0x8e, 0x30, 0xeb, 0x7f, 0xee, 0x4a, 0x1e, 0x61, 0x9f,
0x33, 0x07, 0x2a, 0xf7, 0xd6, 0xb4, 0xf8, 0xc7, 0x65, 0x63, 0xf9, 0xa5, 0x60, 0x78, 0x74, 0xe8, 0xbe, 0x17, 0xd8, 0xa7, 0xc9, 0xf8, 0xd1, 0x3e, 0x2c, 0x9d, 0x46, 0x7c, 0x30, 0x9a, 0xf0, 0x22,
0x2d, 0xeb, 0xf1, 0x11, 0x23, 0x14, 0x36, 0x99, 0x76, 0xf1, 0xa5, 0xb5, 0xf1, 0x53, 0xed, 0x53, 0xd6, 0x5a, 0x81, 0xf6, 0xc0, 0x96, 0x7c, 0x34, 0xdb, 0x45, 0x74, 0xb6, 0xe4, 0xf5, 0x1f, 0x36,
0x73, 0xb7, 0x0b, 0xad, 0x52, 0xf7, 0xe1, 0x4c, 0xd3, 0xb7, 0xfe, 0x57, 0x7b, 0x0e, 0xdc, 0x60, 0xdc, 0x9f, 0xd3, 0x4c, 0x92, 0xfb, 0x2e, 0x2c, 0xab, 0x90, 0x5c, 0x46, 0x74, 0x43, 0xc5, 0xce,
0x79, 0x5e, 0xfd, 0x3d, 0x94, 0x67, 0x7f, 0x93, 0x26, 0xac, 0x52, 0xe5, 0x27, 0x28, 0x95, 0x6f, 0xaa, 0x12, 0xff, 0xba, 0xae, 0x65, 0xdf, 0x70, 0x42, 0x8f, 0x8f, 0x9c, 0xac, 0x2a, 0x1f, 0x13,
0xb6, 0xce, 0x9c, 0xd0, 0xf1, 0x4a, 0x54, 0x79, 0x28, 0xd5, 0xb1, 0x86, 0x74, 0xe3, 0x93, 0x5d, 0x84, 0x61, 0x83, 0x28, 0x17, 0x57, 0x18, 0x1b, 0x37, 0x56, 0x3e, 0x15, 0x7b, 0x3b, 0xd3, 0x28,
0xfe, 0xa3, 0x68, 0xa6, 0xb2, 0xe6, 0x16, 0x6c, 0xbe, 0x4e, 0x78, 0xc0, 0xa3, 0xf0, 0x85, 0x60, 0xb4, 0x9f, 0x4c, 0x24, 0x7d, 0xeb, 0xbf, 0x9a, 0x53, 0xe0, 0x3a, 0x49, 0xf3, 0xaa, 0x9f, 0xa0,
0x38, 0x1c, 0xef, 0xc9, 0x4f, 0x07, 0xaa, 0xf3, 0x78, 0x96, 0xd9, 0x1e, 0xfc, 0x8b, 0x61, 0xa2, 0x38, 0xf9, 0x8d, 0xea, 0xb0, 0x82, 0xa5, 0x1b, 0x51, 0x21, 0x5d, 0x7d, 0x21, 0x74, 0x87, 0x96,
0xd7, 0x7e, 0x40, 0x23, 0xf6, 0x81, 0x33, 0x75, 0xea, 0xc7, 0x09, 0x0f, 0x30, 0x7b, 0x23, 0xaa, 0x53, 0xc0, 0xd2, 0xa1, 0x42, 0x9e, 0x28, 0x48, 0x25, 0x3e, 0xba, 0x3b, 0xff, 0x34, 0x9a, 0xb1,
0x76, 0xda, 0x1b, 0x0f, 0xb5, 0x89, 0x51, 0x25, 0x18, 0x53, 0x9e, 0x5c, 0x53, 0xd9, 0x67, 0xa3, 0xac, 0xbe, 0x09, 0x1b, 0xef, 0x22, 0xe6, 0xb1, 0xc0, 0x7f, 0xcd, 0x09, 0xed, 0x0f, 0xdf, 0xc9,
0x6a, 0xa7, 0x39, 0x55, 0x0b, 0xd6, 0x19, 0x97, 0xe7, 0xbe, 0x8c, 0x69, 0x80, 0x19, 0xbf, 0x60, 0x6f, 0x0b, 0xca, 0xd3, 0x78, 0x32, 0xb3, 0x3d, 0xd8, 0xa2, 0x7e, 0xa4, 0x4e, 0x54, 0x0f, 0x07,
0xf8, 0x6b, 0x1a, 0xef, 0x6b, 0xd8, 0x32, 0xbb, 0xb0, 0x65, 0x57, 0x30, 0x6f, 0xbf, 0x64, 0xe8, 0xe4, 0x8a, 0x11, 0x79, 0xe6, 0x86, 0x11, 0xf3, 0x68, 0x72, 0xcf, 0xca, 0xa6, 0xda, 0x19, 0x16,
0x9b, 0x66, 0x38, 0xef, 0xde, 0xfd, 0xe4, 0x42, 0x51, 0xf7, 0x6c, 0x1f, 0xbc, 0x03, 0x58, 0x19, 0x95, 0x89, 0x56, 0x45, 0x34, 0xc4, 0x2c, 0x9a, 0x51, 0x99, 0x13, 0x57, 0x36, 0xd5, 0x94, 0xaa,
0xbf, 0x0b, 0x64, 0x76, 0x77, 0x73, 0x0f, 0x48, 0xfd, 0xff, 0x1b, 0x67, 0x59, 0x38, 0xef, 0x60, 0x01, 0x6b, 0x84, 0x89, 0x0b, 0x57, 0x84, 0xd8, 0xa3, 0x09, 0x3f, 0xa3, 0xf9, 0xab, 0x0a, 0xef,
0xe3, 0xda, 0x0d, 0x20, 0x3b, 0xbf, 0xbf, 0x1f, 0xd6, 0xf6, 0xfe, 0x22, 0x97, 0x88, 0xbc, 0x82, 0x2a, 0xd8, 0x30, 0xdb, 0xb0, 0x69, 0x9e, 0x60, 0xda, 0x7e, 0x49, 0xd3, 0x37, 0x74, 0x71, 0xda,
0xf2, 0x6c, 0x29, 0xe4, 0xde, 0x8c, 0xea, 0x86, 0x16, 0xeb, 0x8d, 0x5b, 0xe7, 0xd6, 0xb0, 0x57, 0xbd, 0xfd, 0xc5, 0x86, 0xbc, 0xca, 0xd9, 0x1c, 0xe7, 0x43, 0xc8, 0x0d, 0xef, 0x02, 0x9a, 0x7c,
0x7d, 0x4b, 0xf4, 0x95, 0x3e, 0x6b, 0x73, 0xd1, 0x09, 0xc4, 0x68, 0x24, 0xa2, 0x4e, 0x3c, 0x18, 0xbb, 0xa9, 0x03, 0x52, 0x7d, 0x30, 0xb7, 0x96, 0x0c, 0xe7, 0x23, 0xac, 0xcf, 0x6c, 0x00, 0xda,
0x2c, 0x9b, 0xcb, 0xf3, 0xe8, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0x8d, 0x3b, 0x4a, 0x62, 0xf9, 0xfb, 0x7e, 0x18, 0xdb, 0x47, 0x8b, 0x2c, 0x11, 0x7a, 0x0b, 0xc5, 0xc9, 0x50, 0xd0, 0xc3,
0x06, 0x00, 0x00, 0x09, 0xd5, 0x9c, 0x14, 0xab, 0xb5, 0x5b, 0xeb, 0xc6, 0xb0, 0x53, 0xfe, 0x80, 0xd4, 0x4a, 0x9f,
0x37, 0x19, 0x6f, 0x79, 0x7c, 0x30, 0xe0, 0x41, 0x2b, 0xec, 0xf5, 0xb2, 0x7a, 0x79, 0x9e, 0xfe,
0x09, 0x00, 0x00, 0xff, 0xff, 0x6d, 0xa2, 0xc8, 0x62, 0x0e, 0x07, 0x00, 0x00,
} }
// --- DRPC BEGIN --- // --- DRPC BEGIN ---

View file

@ -21,6 +21,9 @@ message ReputationStats {
double reputation_alpha = 3; double reputation_alpha = 3;
double reputation_beta = 4; double reputation_beta = 4;
double reputation_score = 5; double reputation_score = 5;
double unknown_reputation_alpha = 6;
double unknown_reputation_beta = 7;
double unknown_reputation_score = 8;
} }
message GetStatsRequest {} message GetStatsRequest {}

View file

@ -26,7 +26,7 @@ var _ = time.Kitchen
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// PieceAction is an enumeration of all possible executed actions on storage node // PieceAction is an enumeration of all possible executed actions on storage node.
type PieceAction int32 type PieceAction int32
const ( const (
@ -98,7 +98,7 @@ func (SettlementResponse_Status) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{7, 0} return fileDescriptor_e0f5d4cf0fc9e41b, []int{7, 0}
} }
// OrderLimit is provided by satellite to execute specific action on storage node within some limits // OrderLimit is provided by satellite to execute specific action on storage node within some limits.
type OrderLimit struct { type OrderLimit struct {
// unique serial to avoid replay attacks // unique serial to avoid replay attacks
SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"`
@ -200,7 +200,7 @@ func (m *OrderLimit) GetSatelliteAddress() *NodeAddress {
return nil return nil
} }
// OrderLimitSigning provides OrderLimit signing serialization // OrderLimitSigning provides OrderLimit signing serialization.
// //
// It is never used for sending across the network, it is // It is never used for sending across the network, it is
// used in signing to ensure that nullable=false fields get handled properly. // used in signing to ensure that nullable=false fields get handled properly.
@ -306,7 +306,7 @@ func (m *OrderLimitSigning) GetSatelliteAddress() *NodeAddress {
return nil return nil
} }
// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber // Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber.
type Order struct { type Order struct {
// serial of the order limit that was signed // serial of the order limit that was signed
SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"`
@ -357,7 +357,7 @@ func (m *Order) GetUplinkSignature() []byte {
return nil return nil
} }
// OrderSigning provides Order signing format // OrderSigning provides Order signing format.
// //
// It is never used for sending across the network, it is // It is never used for sending across the network, it is
// used in signing to ensure that nullable=false fields get handled properly. // used in signing to ensure that nullable=false fields get handled properly.

View file

@ -10,7 +10,7 @@ import "gogo.proto";
import "google/protobuf/timestamp.proto"; import "google/protobuf/timestamp.proto";
import "node.proto"; import "node.proto";
// PieceAction is an enumeration of all possible executed actions on storage node // PieceAction is an enumeration of all possible executed actions on storage node.
enum PieceAction { enum PieceAction {
INVALID = 0; INVALID = 0;
PUT = 1; PUT = 1;
@ -22,7 +22,7 @@ enum PieceAction {
PUT_GRACEFUL_EXIT = 7; PUT_GRACEFUL_EXIT = 7;
} }
// OrderLimit is provided by satellite to execute specific action on storage node within some limits // OrderLimit is provided by satellite to execute specific action on storage node within some limits.
message OrderLimit { message OrderLimit {
// unique serial to avoid replay attacks // unique serial to avoid replay attacks
bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false];
@ -51,7 +51,7 @@ message OrderLimit {
node.NodeAddress satellite_address = 11; node.NodeAddress satellite_address = 11;
} }
// OrderLimitSigning provides OrderLimit signing serialization // OrderLimitSigning provides OrderLimit signing serialization.
// //
// It is never used for sending across the network, it is // It is never used for sending across the network, it is
// used in signing to ensure that nullable=false fields get handled properly. // used in signing to ensure that nullable=false fields get handled properly.
@ -85,7 +85,7 @@ message OrderLimitSigning {
node.NodeAddress satellite_address = 11; node.NodeAddress satellite_address = 11;
} }
// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber // Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber.
message Order { message Order {
// serial of the order limit that was signed // serial of the order limit that was signed
bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false];
@ -95,7 +95,7 @@ message Order {
bytes uplink_signature = 3; bytes uplink_signature = 3;
} }
// OrderSigning provides Order signing format // OrderSigning provides Order signing format.
// //
// It is never used for sending across the network, it is // It is never used for sending across the network, it is
// used in signing to ensure that nullable=false fields get handled properly. // used in signing to ensure that nullable=false fields get handled properly.

View file

@ -95,6 +95,7 @@ func (m *PrepareInvoiceRecordsResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_PrepareInvoiceRecordsResponse proto.InternalMessageInfo var xxx_messageInfo_PrepareInvoiceRecordsResponse proto.InternalMessageInfo
type ApplyInvoiceRecordsRequest struct { type ApplyInvoiceRecordsRequest struct {
Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -124,6 +125,13 @@ func (m *ApplyInvoiceRecordsRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_ApplyInvoiceRecordsRequest proto.InternalMessageInfo var xxx_messageInfo_ApplyInvoiceRecordsRequest proto.InternalMessageInfo
func (m *ApplyInvoiceRecordsRequest) GetPeriod() time.Time {
if m != nil {
return m.Period
}
return time.Time{}
}
type ApplyInvoiceRecordsResponse struct { type ApplyInvoiceRecordsResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -155,6 +163,7 @@ func (m *ApplyInvoiceRecordsResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_ApplyInvoiceRecordsResponse proto.InternalMessageInfo var xxx_messageInfo_ApplyInvoiceRecordsResponse proto.InternalMessageInfo
type ApplyInvoiceCouponsRequest struct { type ApplyInvoiceCouponsRequest struct {
Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -184,6 +193,13 @@ func (m *ApplyInvoiceCouponsRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_ApplyInvoiceCouponsRequest proto.InternalMessageInfo var xxx_messageInfo_ApplyInvoiceCouponsRequest proto.InternalMessageInfo
func (m *ApplyInvoiceCouponsRequest) GetPeriod() time.Time {
if m != nil {
return m.Period
}
return time.Time{}
}
type ApplyInvoiceCouponsResponse struct { type ApplyInvoiceCouponsResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -215,6 +231,7 @@ func (m *ApplyInvoiceCouponsResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_ApplyInvoiceCouponsResponse proto.InternalMessageInfo var xxx_messageInfo_ApplyInvoiceCouponsResponse proto.InternalMessageInfo
type ApplyInvoiceCreditsRequest struct { type ApplyInvoiceCreditsRequest struct {
Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -244,6 +261,13 @@ func (m *ApplyInvoiceCreditsRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_ApplyInvoiceCreditsRequest proto.InternalMessageInfo var xxx_messageInfo_ApplyInvoiceCreditsRequest proto.InternalMessageInfo
func (m *ApplyInvoiceCreditsRequest) GetPeriod() time.Time {
if m != nil {
return m.Period
}
return time.Time{}
}
type ApplyInvoiceCreditsResponse struct { type ApplyInvoiceCreditsResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -275,6 +299,7 @@ func (m *ApplyInvoiceCreditsResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_ApplyInvoiceCreditsResponse proto.InternalMessageInfo var xxx_messageInfo_ApplyInvoiceCreditsResponse proto.InternalMessageInfo
type CreateInvoicesRequest struct { type CreateInvoicesRequest struct {
Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -304,6 +329,13 @@ func (m *CreateInvoicesRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_CreateInvoicesRequest proto.InternalMessageInfo var xxx_messageInfo_CreateInvoicesRequest proto.InternalMessageInfo
func (m *CreateInvoicesRequest) GetPeriod() time.Time {
if m != nil {
return m.Period
}
return time.Time{}
}
type CreateInvoicesResponse struct { type CreateInvoicesResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -350,29 +382,29 @@ func init() {
func init() { proto.RegisterFile("payments.proto", fileDescriptor_a9566e6e864d2854) } func init() { proto.RegisterFile("payments.proto", fileDescriptor_a9566e6e864d2854) }
var fileDescriptor_a9566e6e864d2854 = []byte{ var fileDescriptor_a9566e6e864d2854 = []byte{
// 341 bytes of a gzipped FileDescriptorProto // 342 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x4e, 0xfa, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0x41, 0x4e, 0xc2, 0x40,
0x10, 0xc7, 0x7f, 0xe4, 0x97, 0x10, 0x5c, 0x13, 0x0e, 0xab, 0x28, 0x59, 0x21, 0xc5, 0x26, 0x2a, 0x14, 0x86, 0x25, 0x26, 0x04, 0x9f, 0x09, 0x8b, 0x51, 0x0c, 0xa9, 0x92, 0x22, 0x89, 0xca, 0x6a,
0xa7, 0x6d, 0x82, 0x57, 0x2f, 0xc2, 0xc9, 0x1b, 0x21, 0x7a, 0x31, 0x5e, 0x0a, 0x1d, 0x9b, 0x12, 0x9a, 0xe0, 0xd6, 0x8d, 0xb0, 0x72, 0x47, 0x88, 0x6c, 0x88, 0x9b, 0x42, 0x9f, 0x4d, 0x09, 0xed,
0xda, 0x59, 0x77, 0x17, 0x13, 0xde, 0xc2, 0xc7, 0xf2, 0x29, 0xf4, 0x51, 0x34, 0xb0, 0x6d, 0x43, 0x1b, 0x67, 0x06, 0x13, 0x6e, 0xe1, 0xb1, 0x3c, 0x85, 0x1e, 0x45, 0x03, 0xd3, 0x12, 0x8a, 0x1d,
0x71, 0x17, 0x8e, 0x9d, 0xf9, 0xfe, 0x49, 0xe7, 0x93, 0x25, 0x4d, 0x11, 0xae, 0x52, 0xc8, 0xb4, 0xd8, 0xc8, 0xb2, 0xcd, 0x9b, 0xef, 0x5b, 0xfc, 0x1f, 0x54, 0x85, 0xbf, 0x88, 0x31, 0xd1, 0x8a,
0xe2, 0x42, 0xa2, 0x46, 0x7a, 0x94, 0x61, 0x04, 0x4a, 0x87, 0x5a, 0x31, 0x12, 0x63, 0x8c, 0x66, 0x0b, 0x49, 0x9a, 0xd8, 0x49, 0x42, 0x01, 0x2a, 0xed, 0x6b, 0xe5, 0x40, 0x48, 0x21, 0x99, 0xdf,
0xcc, 0xbc, 0x18, 0x31, 0x5e, 0x40, 0xb0, 0xf9, 0x9a, 0x2e, 0x5f, 0x03, 0x9d, 0xa4, 0x6b, 0x59, 0x8e, 0x1b, 0x12, 0x85, 0x33, 0xf4, 0x56, 0x5f, 0xe3, 0xf9, 0xab, 0xa7, 0xa3, 0x78, 0x79, 0x16,
0x2a, 0x8c, 0xc0, 0x7f, 0x21, 0x9d, 0xb1, 0x04, 0x11, 0x4a, 0x78, 0xc8, 0xde, 0x31, 0x99, 0xc1, 0x0b, 0x73, 0xd0, 0x7a, 0x81, 0xab, 0xbe, 0x44, 0xe1, 0x4b, 0x7c, 0x4a, 0xde, 0x29, 0x9a, 0xe0,
0x04, 0x66, 0x28, 0x23, 0x35, 0x81, 0xb7, 0x25, 0x28, 0x4d, 0xef, 0x48, 0x5d, 0x80, 0x4c, 0x30, 0x00, 0x27, 0x24, 0x03, 0x35, 0xc0, 0xb7, 0x39, 0x2a, 0xcd, 0x1e, 0xa0, 0x2c, 0x50, 0x46, 0x14,
0x6a, 0xd7, 0x7a, 0xb5, 0xfe, 0xf1, 0x80, 0x71, 0x93, 0xc8, 0x8b, 0x44, 0xfe, 0x58, 0x24, 0x0e, 0xd4, 0x4b, 0xcd, 0x52, 0xfb, 0xb4, 0xe3, 0x70, 0x43, 0xe4, 0x19, 0x91, 0x3f, 0x67, 0xc4, 0x6e,
0x1b, 0x9f, 0x5f, 0xde, 0xbf, 0x8f, 0x6f, 0xaf, 0x36, 0xc9, 0x3d, 0xbe, 0x47, 0xba, 0x8e, 0x74, 0xe5, 0xf3, 0xcb, 0x3d, 0xfa, 0xf8, 0x76, 0x4b, 0x83, 0xf4, 0x4d, 0xcb, 0x85, 0x86, 0x85, 0xae,
0x25, 0x30, 0x53, 0xe0, 0x77, 0x08, 0xbb, 0x17, 0x62, 0xb1, 0xb2, 0x96, 0xfb, 0x5d, 0x72, 0x61, 0x04, 0x25, 0x0a, 0x5b, 0x23, 0x70, 0x1e, 0x85, 0x98, 0x2d, 0x0e, 0x21, 0x6f, 0xc0, 0x65, 0x21,
0xdd, 0xda, 0xcd, 0x23, 0x5c, 0xae, 0xe7, 0x0e, 0x73, 0xb9, 0x75, 0x98, 0x25, 0x44, 0x89, 0x76, 0xbb, 0x58, 0xdd, 0xa3, 0xf9, 0xf2, 0xff, 0x41, 0xd4, 0x6b, 0xb6, 0x45, 0x2d, 0x31, 0x88, 0xf4,
0x9a, 0x8b, 0x6d, 0x6e, 0x3e, 0x27, 0xad, 0x91, 0x84, 0x50, 0x17, 0xbf, 0x55, 0xfa, 0xda, 0xe4, 0x81, 0xd4, 0x19, 0x3b, 0x55, 0x0f, 0xa1, 0xd6, 0x93, 0xe8, 0xeb, 0x6c, 0x90, 0x7f, 0xb2, 0xd6,
0x6c, 0x77, 0x61, 0x2c, 0x83, 0x9f, 0xff, 0xa4, 0x31, 0xce, 0x99, 0xd1, 0x39, 0x69, 0x59, 0xef, 0xe1, 0x62, 0x1b, 0x6b, 0x84, 0x9d, 0x9f, 0x63, 0xa8, 0xf4, 0xd3, 0x56, 0xd9, 0x14, 0x6a, 0x85,
0x42, 0x6f, 0x78, 0xc9, 0x91, 0xef, 0xe3, 0xc2, 0xfa, 0x87, 0x85, 0xa6, 0x98, 0x46, 0xe4, 0xc4, 0x3d, 0xb0, 0x3b, 0xbe, 0xee, 0x97, 0xef, 0xea, 0xd1, 0x69, 0xef, 0x3f, 0x34, 0x62, 0x16, 0xc0,
0x72, 0x44, 0x7a, 0xb5, 0x15, 0xe0, 0x46, 0xc0, 0xae, 0x0f, 0xc9, 0xec, 0x2d, 0xf9, 0xb5, 0x9d, 0x59, 0xc1, 0xfc, 0xec, 0x66, 0x03, 0x60, 0x4f, 0xcf, 0xb9, 0xdd, 0x77, 0x56, 0x6c, 0x49, 0x97,
0x2d, 0x55, 0x56, 0xce, 0x96, 0x1d, 0x68, 0x7f, 0x5a, 0x0c, 0x16, 0x77, 0x4b, 0x05, 0xaa, 0xbb, 0xb6, 0x5a, 0xf2, 0x95, 0x59, 0x2d, 0x5b, 0xc1, 0xfc, 0xb1, 0x98, 0x51, 0xed, 0x96, 0x5c, 0x50,
0xa5, 0x4a, 0x97, 0x3e, 0x91, 0x66, 0x15, 0x22, 0xed, 0x6d, 0x39, 0xad, 0xe0, 0xd9, 0xe5, 0x1e, 0x76, 0x4b, 0xbe, 0x0d, 0x36, 0x84, 0x6a, 0x7e, 0x44, 0xd6, 0xdc, 0x78, 0x59, 0x98, 0x8d, 0x73,
0x85, 0x89, 0x1d, 0x9e, 0x3e, 0x53, 0xa5, 0x51, 0xce, 0x79, 0x82, 0xc1, 0x0c, 0xd3, 0x14, 0xb3, 0xbd, 0xe3, 0xc2, 0x60, 0xbb, 0xe7, 0x23, 0xa6, 0x34, 0xc9, 0x29, 0x8f, 0xc8, 0x9b, 0x50, 0x1c,
0x40, 0x4c, 0xa7, 0xf5, 0xcd, 0x43, 0xba, 0xfd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x32, 0x6d, 0x84, 0x53, 0xe2, 0x89, 0xf1, 0xb8, 0xbc, 0xea, 0xea, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xde,
0xad, 0xd1, 0x03, 0x00, 0x00, 0x0b, 0xc5, 0xc9, 0x04, 0x00, 0x00,
} }
// --- DRPC BEGIN --- // --- DRPC BEGIN ---

View file

@ -23,14 +23,26 @@ message PrepareInvoiceRecordsRequest {
message PrepareInvoiceRecordsResponse {} message PrepareInvoiceRecordsResponse {}
message ApplyInvoiceRecordsRequest {} message ApplyInvoiceRecordsRequest {
google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message ApplyInvoiceRecordsResponse {} message ApplyInvoiceRecordsResponse {}
message ApplyInvoiceCouponsRequest {} message ApplyInvoiceCouponsRequest {
google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message ApplyInvoiceCouponsResponse {} message ApplyInvoiceCouponsResponse {}
message ApplyInvoiceCreditsRequest {} message ApplyInvoiceCreditsRequest {
google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message ApplyInvoiceCreditsResponse {} message ApplyInvoiceCreditsResponse {}
message CreateInvoicesRequest {} message CreateInvoicesRequest {
google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message CreateInvoicesResponse {} message CreateInvoicesResponse {}

View file

@ -58,7 +58,6 @@ func (PieceHeader_FormatVersion) EnumDescriptor() ([]byte, []int) {
// Chunk -> // Chunk ->
// PieceHash signed by uplink -> // PieceHash signed by uplink ->
// <- PieceHash signed by storage node // <- PieceHash signed by storage node
//
type PieceUploadRequest struct { type PieceUploadRequest struct {
// first message to show that we are allowed to upload // first message to show that we are allowed to upload
Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"`

View file

@ -29,7 +29,6 @@ service Piecestore {
// Chunk -> // Chunk ->
// PieceHash signed by uplink -> // PieceHash signed by uplink ->
// <- PieceHash signed by storage node // <- PieceHash signed by storage node
//
message PieceUploadRequest { message PieceUploadRequest {
// first message to show that we are allowed to upload // first message to show that we are allowed to upload
orders.OrderLimit limit = 1; orders.OrderLimit limit = 1;

View file

@ -364,7 +364,6 @@ func (m *Pointer) GetPieceHashesVerified() bool {
return false return false
} }
// ListResponse is a response message for the List rpc call
type ListResponse struct { type ListResponse struct {
Items []*ListResponse_Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` Items []*ListResponse_Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"`

View file

@ -61,7 +61,6 @@ message Pointer {
bool piece_hashes_verified = 11; bool piece_hashes_verified = 11;
} }
// ListResponse is a response message for the List rpc call
message ListResponse { message ListResponse {
message Item { message Item {
string path = 1; string path = 1;

View file

@ -5,7 +5,7 @@ import "gogo.proto";
package referralmanager; package referralmanager;
// ReferralManager is a service for handling referrals // ReferralManager is a service for handling referrals.
service ReferralManager { service ReferralManager {
// GetTokens retrieves a list of unredeemed tokens for a user // GetTokens retrieves a list of unredeemed tokens for a user
rpc GetTokens(GetTokensRequest) returns (GetTokensResponse); rpc GetTokens(GetTokensRequest) returns (GetTokensResponse);

20
vendor/storj.io/common/pb/types.go generated vendored
View file

@ -5,32 +5,32 @@ package pb
import "storj.io/common/storj" import "storj.io/common/storj"
// Path represents a object path // Path represents a object path.
type Path = storj.Path type Path = storj.Path
// NodeID is an alias to storj.NodeID for use in generated protobuf code // NodeID is an alias to storj.NodeID for use in generated protobuf code.
type NodeID = storj.NodeID type NodeID = storj.NodeID
// NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code // NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code.
type NodeIDList = storj.NodeIDList type NodeIDList = storj.NodeIDList
// PieceID is an alias to storj.PieceID for use in generated protobuf code // PieceID is an alias to storj.PieceID for use in generated protobuf code.
type PieceID = storj.PieceID type PieceID = storj.PieceID
// PiecePublicKey is an alias to storj.PiecePublicKey for use in generated protobuf code // PiecePublicKey is an alias to storj.PiecePublicKey for use in generated protobuf code.
type PiecePublicKey = storj.PiecePublicKey type PiecePublicKey = storj.PiecePublicKey
// PiecePrivateKey is an alias to storj.PiecePrivateKey for use in generated protobuf code // PiecePrivateKey is an alias to storj.PiecePrivateKey for use in generated protobuf code.
type PiecePrivateKey = storj.PiecePrivateKey type PiecePrivateKey = storj.PiecePrivateKey
// SerialNumber is an alias to storj.SerialNumber for use in generated protobuf code // SerialNumber is an alias to storj.SerialNumber for use in generated protobuf code.
type SerialNumber = storj.SerialNumber type SerialNumber = storj.SerialNumber
// StreamID is an alias to storj.StreamID for use in generated protobuf code // StreamID is an alias to storj.StreamID for use in generated protobuf code.
type StreamID = storj.StreamID type StreamID = storj.StreamID
// Nonce is an alias to storj.Nonce for use in generated protobuf code // Nonce is an alias to storj.Nonce for use in generated protobuf code.
type Nonce = storj.Nonce type Nonce = storj.Nonce
// SegmentID is an alias to storj.SegmentID for use in generated protobuf code // SegmentID is an alias to storj.SegmentID for use in generated protobuf code.
type SegmentID = storj.SegmentID type SegmentID = storj.SegmentID

8
vendor/storj.io/common/pb/utils.go generated vendored
View file

@ -12,7 +12,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// Equal compares two Protobuf messages via serialization // Equal compares two Protobuf messages via serialization.
func Equal(msg1, msg2 proto.Message) bool { func Equal(msg1, msg2 proto.Message) bool {
//reflect.DeepEqual and proto.Equal don't seem work in all cases //reflect.DeepEqual and proto.Equal don't seem work in all cases
//todo: see how slow this is compared to custom equality checks //todo: see how slow this is compared to custom equality checks
@ -33,7 +33,7 @@ func Equal(msg1, msg2 proto.Message) bool {
return bytes.Equal(msg1Bytes, msg2Bytes) return bytes.Equal(msg1Bytes, msg2Bytes)
} }
// NodesToIDs extracts Node-s into a list of ids // NodesToIDs extracts Node-s into a list of ids.
func NodesToIDs(nodes []*Node) storj.NodeIDList { func NodesToIDs(nodes []*Node) storj.NodeIDList {
ids := make(storj.NodeIDList, len(nodes)) ids := make(storj.NodeIDList, len(nodes))
for i, node := range nodes { for i, node := range nodes {
@ -47,7 +47,7 @@ func NodesToIDs(nodes []*Node) storj.NodeIDList {
// CopyNode returns a deep copy of a node // CopyNode returns a deep copy of a node
// It would be better to use `proto.Clone` but it is curently incompatible // It would be better to use `proto.Clone` but it is curently incompatible
// with gogo's customtype extension. // with gogo's customtype extension.
// (see https://github.com/gogo/protobuf/issues/147) // (see https://github.com/gogo/protobuf/issues/147).
func CopyNode(src *Node) (dst *Node) { func CopyNode(src *Node) (dst *Node) {
node := Node{Id: storj.NodeID{}} node := Node{Id: storj.NodeID{}}
copy(node.Id[:], src.Id[:]) copy(node.Id[:], src.Id[:])
@ -62,7 +62,7 @@ func CopyNode(src *Node) (dst *Node) {
return &node return &node
} }
// AddressEqual compares two node addresses // AddressEqual compares two node addresses.
func AddressEqual(a1, a2 *NodeAddress) bool { func AddressEqual(a1, a2 *NodeAddress) bool {
if a1 == nil && a2 == nil { if a1 == nil && a2 == nil {
return true return true

View file

@ -54,7 +54,7 @@ func (VoucherResponse_Status) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_3659b9a115b8060d, []int{2, 0} return fileDescriptor_3659b9a115b8060d, []int{2, 0}
} }
// Voucher is a signed message verifying that a node has been vetted by a particular satellite // Voucher is a signed message verifying that a node has been vetted by a particular satellite.
type Voucher struct { type Voucher struct {
SatelliteId NodeID `protobuf:"bytes,1,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"` SatelliteId NodeID `protobuf:"bytes,1,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"`
StorageNodeId NodeID `protobuf:"bytes,2,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"` StorageNodeId NodeID `protobuf:"bytes,2,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"`

View file

@ -9,7 +9,7 @@ package vouchers;
import "gogo.proto"; import "gogo.proto";
import "google/protobuf/timestamp.proto"; import "google/protobuf/timestamp.proto";
// Voucher is a signed message verifying that a node has been vetted by a particular satellite // Voucher is a signed message verifying that a node has been vetted by a particular satellite.
message Voucher { message Voucher {
bytes satellite_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; bytes satellite_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
bytes storage_node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; bytes storage_node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];

View file

@ -64,7 +64,7 @@ var (
type ExtensionID = asn1.ObjectIdentifier type ExtensionID = asn1.ObjectIdentifier
// Config is used to bind cli flags for determining which extensions will // Config is used to bind cli flags for determining which extensions will
// be used by the server // be used by the server.
type Config struct { type Config struct {
Revocation bool `default:"true" help:"if true, client leaves may contain the most recent certificate revocation for the current certificate"` Revocation bool `default:"true" help:"if true, client leaves may contain the most recent certificate revocation for the current certificate"`
WhitelistSignedLeaf bool `default:"false" help:"if true, client leaves must contain a valid \"signed certificate extension\" (NB: verified against certs in the peer ca whitelist; i.e. if true, a whitelist must be provided)"` WhitelistSignedLeaf bool `default:"false" help:"if true, client leaves must contain a valid \"signed certificate extension\" (NB: verified against certs in the peer ca whitelist; i.e. if true, a whitelist must be provided)"`

View file

@ -27,16 +27,16 @@ var (
RevocationUpdateHandler = NewHandlerFactory(&RevocationExtID, revocationUpdater) RevocationUpdateHandler = NewHandlerFactory(&RevocationExtID, revocationUpdater)
) )
// ErrRevocation is used when an error occurs involving a certificate revocation // ErrRevocation is used when an error occurs involving a certificate revocation.
var ErrRevocation = errs.Class("revocation processing error") var ErrRevocation = errs.Class("revocation processing error")
// ErrRevocationDB is used when an error occurs involving the revocations database // ErrRevocationDB is used when an error occurs involving the revocations database.
var ErrRevocationDB = errs.Class("revocation database error") var ErrRevocationDB = errs.Class("revocation database error")
// ErrRevokedCert is used when a certificate in the chain is revoked and not expected to be // ErrRevokedCert is used when a certificate in the chain is revoked and not expected to be.
var ErrRevokedCert = ErrRevocation.New("a certificate in the chain is revoked") var ErrRevokedCert = ErrRevocation.New("a certificate in the chain is revoked")
// ErrRevocationTimestamp is used when a revocation's timestamp is older than the last recorded revocation // ErrRevocationTimestamp is used when a revocation's timestamp is older than the last recorded revocation.
var ErrRevocationTimestamp = Error.New("revocation timestamp is older than last known revocation") var ErrRevocationTimestamp = Error.New("revocation timestamp is older than last known revocation")
// Revocation represents a certificate revocation for storage in the revocation // Revocation represents a certificate revocation for storage in the revocation
@ -161,12 +161,12 @@ func (r *Revocation) Sign(key crypto.PrivateKey) error {
return nil return nil
} }
// Marshal serializes a revocation to bytes // Marshal serializes a revocation to bytes.
func (r Revocation) Marshal() ([]byte, error) { func (r Revocation) Marshal() ([]byte, error) {
return (&revocationEncoder{}).encode(r) return (&revocationEncoder{}).encode(r)
} }
// Unmarshal deserializes a revocation from bytes // Unmarshal deserializes a revocation from bytes.
func (r *Revocation) Unmarshal(data []byte) error { func (r *Revocation) Unmarshal(data []byte) error {
revocation, err := (&revocationDecoder{}).decode(data) revocation, err := (&revocationDecoder{}).decode(data)
if err != nil { if err != nil {

View file

@ -8,7 +8,7 @@ import (
"crypto/x509/pkix" "crypto/x509/pkix"
) )
// CATemplate returns x509.Certificate template for certificate authority // CATemplate returns x509.Certificate template for certificate authority.
func CATemplate() (*x509.Certificate, error) { func CATemplate() (*x509.Certificate, error) {
serialNumber, err := newSerialNumber() serialNumber, err := newSerialNumber()
if err != nil { if err != nil {
@ -26,7 +26,7 @@ func CATemplate() (*x509.Certificate, error) {
return template, nil return template, nil
} }
// LeafTemplate returns x509.Certificate template for signing and encrypting // LeafTemplate returns x509.Certificate template for signing and encrypting.
func LeafTemplate() (*x509.Certificate, error) { func LeafTemplate() (*x509.Certificate, error) {
serialNumber, err := newSerialNumber() serialNumber, err := newSerialNumber()
if err != nil { if err != nil {

View file

@ -7,7 +7,7 @@ import (
"storj.io/common/peertls/extensions" "storj.io/common/peertls/extensions"
) )
// Config holds tls configuration parameters // Config holds tls configuration parameters.
type Config struct { type Config struct {
RevocationDBURL string `default:"bolt://$CONFDIR/revocations.db" help:"url for revocation database (e.g. bolt://some.db OR redis://127.0.0.1:6378?db=2&password=abc123)"` RevocationDBURL string `default:"bolt://$CONFDIR/revocations.db" help:"url for revocation database (e.g. bolt://some.db OR redis://127.0.0.1:6378?db=2&password=abc123)"`
PeerCAWhitelistPath string `help:"path to the CA cert whitelist (peer identities must be signed by one these to be verified). this will override the default peer whitelist"` PeerCAWhitelistPath string `help:"path to the CA cert whitelist (peer identities must be signed by one these to be verified). this will override the default peer whitelist"`

View file

@ -24,7 +24,7 @@ func (opts *Options) ClientTLSConfig(id storj.NodeID) *tls.Config {
} }
// ClientTLSConfigPrefix returns a TSLConfig for use as a client in handshaking with a peer. // ClientTLSConfigPrefix returns a TSLConfig for use as a client in handshaking with a peer.
// The peer node id is validated to match the given prefix // The peer node id is validated to match the given prefix.
func (opts *Options) ClientTLSConfigPrefix(idPrefix string) *tls.Config { func (opts *Options) ClientTLSConfigPrefix(idPrefix string) *tls.Config {
return opts.tlsConfig(false, verifyIdentityPrefix(idPrefix)) return opts.tlsConfig(false, verifyIdentityPrefix(idPrefix))
} }

View file

@ -49,12 +49,12 @@ func DoubleSHA256PublicKey(k crypto.PublicKey) ([sha256.Size]byte, error) {
return end, nil return end, nil
} }
// Temporary returns false to indicate that is is a non-temporary error // Temporary returns false to indicate that is is a non-temporary error.
func (nte NonTemporaryError) Temporary() bool { func (nte NonTemporaryError) Temporary() bool {
return false return false
} }
// Err returns the underlying error // Err returns the underlying error.
func (nte NonTemporaryError) Err() error { func (nte NonTemporaryError) Err() error {
return nte.error return nte.error
} }

View file

@ -14,7 +14,7 @@ func NewHash() hash.Hash {
return sha256.New() return sha256.New()
} }
// SHA256Hash calculates the SHA256 hash of the input data // SHA256Hash calculates the SHA256 hash of the input data.
func SHA256Hash(data []byte) []byte { func SHA256Hash(data []byte) []byte {
sum := sha256.Sum256(data) sum := sha256.Sum256(data)
return sum[:] return sum[:]

View file

@ -33,18 +33,18 @@ var (
} }
) )
// GeneratePrivateKey returns a new PrivateKey for signing messages // GeneratePrivateKey returns a new PrivateKey for signing messages.
func GeneratePrivateKey() (crypto.PrivateKey, error) { func GeneratePrivateKey() (crypto.PrivateKey, error) {
return GeneratePrivateECDSAKey(authECCurve) return GeneratePrivateECDSAKey(authECCurve)
// return GeneratePrivateRSAKey(StorjRSAKeyBits) // return GeneratePrivateRSAKey(StorjRSAKeyBits)
} }
// GeneratePrivateECDSAKey returns a new private ECDSA key for signing messages // GeneratePrivateECDSAKey returns a new private ECDSA key for signing messages.
func GeneratePrivateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) { func GeneratePrivateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) {
return ecdsa.GenerateKey(curve, rand.Reader) return ecdsa.GenerateKey(curve, rand.Reader)
} }
// GeneratePrivateRSAKey returns a new private RSA key for signing messages // GeneratePrivateRSAKey returns a new private RSA key for signing messages.
func GeneratePrivateRSAKey(bits int) (*rsa.PrivateKey, error) { func GeneratePrivateRSAKey(bits int) (*rsa.PrivateKey, error) {
return rsa.GenerateKey(rand.Reader, bits) return rsa.GenerateKey(rand.Reader, bits)
} }

View file

@ -8,7 +8,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// Error is the errs class of standard Ranger errors // Error is the errs class of standard Ranger errors.
var Error = errs.Class("ranger error") var Error = errs.Class("ranger error")
var mon = monkit.Package() var mon = monkit.Package()

View file

@ -20,13 +20,13 @@ type Ranger interface {
Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error)
} }
// ByteRanger turns a byte slice into a Ranger // ByteRanger turns a byte slice into a Ranger.
type ByteRanger []byte type ByteRanger []byte
// Size implements Ranger.Size // Size implements Ranger.Size.
func (b ByteRanger) Size() int64 { return int64(len(b)) } func (b ByteRanger) Size() int64 { return int64(len(b)) }
// Range implements Ranger.Range // Range implements Ranger.Range.
func (b ByteRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { func (b ByteRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if offset < 0 { if offset < 0 {
@ -75,7 +75,7 @@ func concat2(r1, r2 Ranger) Ranger {
return &concatReader{r1: r1, r2: r2} return &concatReader{r1: r1, r2: r2}
} }
// Concat concatenates Rangers // Concat concatenates Rangers.
func Concat(r ...Ranger) Ranger { func Concat(r ...Ranger) Ranger {
switch len(r) { switch len(r) {
case 0: case 0:

View file

@ -13,7 +13,7 @@ type readerAtRanger struct {
size int64 size int64
} }
// ReaderAtRanger converts a ReaderAt with a given size to a Ranger // ReaderAtRanger converts a ReaderAt with a given size to a Ranger.
func ReaderAtRanger(r io.ReaderAt, size int64) Ranger { func ReaderAtRanger(r io.ReaderAt, size int64) Ranger {
return &readerAtRanger{ return &readerAtRanger{
r: r, r: r,

View file

@ -18,9 +18,6 @@ import (
const ( const (
// IsDRPC is true if drpc is being used. // IsDRPC is true if drpc is being used.
IsDRPC = true IsDRPC = true
// IsGRPC is true if grpc is being used.
IsGRPC = false
) )
var mon = monkit.Package() var mon = monkit.Package()

34
vendor/storj.io/common/rpc/dial.go generated vendored
View file

@ -37,6 +37,13 @@ func NewDefaultManagerOptions() drpcmanager.Options {
} }
} }
// Transport is a type that creates net.Conns, given an address.
// net.Dialer implements this interface and is used by default.
type Transport interface {
// DialContext is called to establish a connection.
DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
// Dialer holds configuration for dialing. // Dialer holds configuration for dialing.
type Dialer struct { type Dialer struct {
// TLSOptions controls the tls options for dialing. If it is nil, only // TLSOptions controls the tls options for dialing. If it is nil, only
@ -65,6 +72,9 @@ type Dialer struct {
// socket option on dialed connections. Only valid on linux. Only set // socket option on dialed connections. Only valid on linux. Only set
// if positive. // if positive.
TCPUserTimeout time.Duration TCPUserTimeout time.Duration
// Transport is how sockets are opened. If nil, net.Dialer is used.
Transport Transport
} }
// NewDefaultDialer returns a Dialer with default timeouts set. // NewDefaultDialer returns a Dialer with default timeouts set.
@ -96,7 +106,12 @@ func (d Dialer) dialContext(ctx context.Context, address string) (net.Conn, erro
} }
} }
conn, err := new(net.Dialer).DialContext(ctx, "tcp", address) dialer := d.Transport
if dialer == nil {
dialer = new(net.Dialer)
}
conn, err := dialer.DialContext(ctx, "tcp", address)
if err != nil { if err != nil {
// N.B. this error is not wrapped on purpose! grpc code cares about inspecting // N.B. this error is not wrapped on purpose! grpc code cares about inspecting
// it and it's not smart enough to attempt to do any unwrapping. :( Additionally // it and it's not smart enough to attempt to do any unwrapping. :( Additionally
@ -142,7 +157,7 @@ func (d Dialer) DialNode(ctx context.Context, node *pb.Node) (_ *Conn, err error
// DialAddressID dials to the specified address and asserts it has the given node id. // DialAddressID dials to the specified address and asserts it has the given node id.
func (d Dialer) DialAddressID(ctx context.Context, address string, id storj.NodeID) (_ *Conn, err error) { func (d Dialer) DialAddressID(ctx context.Context, address string, id storj.NodeID) (_ *Conn, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx, "node: "+id.String()[0:8])(&err)
if d.TLSOptions == nil { if d.TLSOptions == nil {
return nil, Error.New("tls options not set when required for this dial") return nil, Error.New("tls options not set when required for this dial")
@ -151,6 +166,17 @@ func (d Dialer) DialAddressID(ctx context.Context, address string, id storj.Node
return d.dial(ctx, address, d.TLSOptions.ClientTLSConfig(id)) return d.dial(ctx, address, d.TLSOptions.ClientTLSConfig(id))
} }
// DialNodeURL dials to the specified node url and asserts it has the given node id.
func (d Dialer) DialNodeURL(ctx context.Context, nodeURL storj.NodeURL) (_ *Conn, err error) {
defer mon.Task()(&ctx, "node: "+nodeURL.ID.String()[0:8])(&err)
if d.TLSOptions == nil {
return nil, Error.New("tls options not set when required for this dial")
}
return d.dial(ctx, nodeURL.Address, d.TLSOptions.ClientTLSConfig(nodeURL.ID))
}
// DialAddressInsecureBestEffort is like DialAddressInsecure but tries to dial a node securely if // DialAddressInsecureBestEffort is like DialAddressInsecure but tries to dial a node securely if
// it can. // it can.
// //
@ -330,7 +356,7 @@ type tlsConnWrapper struct {
underlying net.Conn underlying net.Conn
} }
// Close closes the underlying connection // Close closes the underlying connection.
func (t *tlsConnWrapper) Close() error { return t.underlying.Close() } func (t *tlsConnWrapper) Close() error { return t.underlying.Close() }
// drpcHeaderConn fulfills the net.Conn interface. On the first call to Write // drpcHeaderConn fulfills the net.Conn interface. On the first call to Write
@ -340,7 +366,7 @@ type drpcHeaderConn struct {
once sync.Once once sync.Once
} }
// newDrpcHeaderConn returns a new *drpcHeaderConn // newDrpcHeaderConn returns a new *drpcHeaderConn.
func newDrpcHeaderConn(conn net.Conn) *drpcHeaderConn { func newDrpcHeaderConn(conn net.Conn) *drpcHeaderConn {
return &drpcHeaderConn{ return &drpcHeaderConn{
Conn: conn, Conn: conn,

View file

@ -43,7 +43,7 @@ func init() {
} }
} }
// KnownNodeID looks for a well-known node id for a given address // KnownNodeID looks for a well-known node id for a given address.
func KnownNodeID(address string) (id storj.NodeID, known bool) { func KnownNodeID(address string) (id storj.NodeID, known bool) {
id, known = knownNodeIDs[address] id, known = knownNodeIDs[address]
if !known { if !known {

View file

@ -11,7 +11,6 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/common/internal/grpchook"
"storj.io/drpc/drpcctx" "storj.io/drpc/drpcctx"
) )
@ -38,13 +37,8 @@ func FromContext(ctx context.Context) (*Peer, error) {
return peer, nil return peer, nil
} else if peer, drpcErr := drpcInternalFromContext(ctx); drpcErr == nil { } else if peer, drpcErr := drpcInternalFromContext(ctx); drpcErr == nil {
return peer, nil return peer, nil
} else if addr, state, grpcErr := grpchook.InternalFromContext(ctx); grpcErr == nil {
return &Peer{Addr: addr, State: state}, nil
} else { } else {
if grpcErr == grpchook.ErrNotHooked { return nil, drpcErr
grpcErr = nil
}
return nil, errs.Combine(drpcErr, grpcErr)
} }
} }

View file

@ -23,7 +23,7 @@ var mon = monkit.Package()
// we may want to keep. that adds quite a bit of complexity because channels // we may want to keep. that adds quite a bit of complexity because channels
// do not support removing buffered elements, so it didn't seem worth it. // do not support removing buffered elements, so it didn't seem worth it.
// expiringConn wraps a connection // expiringConn wraps a connection.
type expiringConn struct { type expiringConn struct {
conn *drpcconn.Conn conn *drpcconn.Conn
timer *time.Timer timer *time.Timer
@ -39,6 +39,11 @@ func newExpiringConn(conn *drpcconn.Conn, dur time.Duration) *expiringConn {
return ex return ex
} }
// Closed returns true if the connection is already closed.
func (ex *expiringConn) Closed() bool {
return ex.conn.Closed()
}
// Cancel attempts to cancel the expiration timer and returns true if the // Cancel attempts to cancel the expiration timer and returns true if the
// timer will not close the connection. // timer will not close the connection.
func (ex *expiringConn) Cancel() bool { func (ex *expiringConn) Cancel() bool {
@ -107,6 +112,16 @@ func (c *Conn) Close() (err error) {
return err return err
} }
// Closed returns true if the connection is already closed.
func (c *Conn) Closed() bool {
select {
case <-c.done:
return true
default:
return false
}
}
// newConn creates a new connection using the dialer. // newConn creates a new connection using the dialer.
func (c *Conn) newConn(ctx context.Context) (_ *drpcconn.Conn, err error) { func (c *Conn) newConn(ctx context.Context) (_ *drpcconn.Conn, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View file

@ -10,7 +10,6 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/common/internal/grpchook"
"storj.io/drpc/drpcerr" "storj.io/drpc/drpcerr"
) )
@ -55,13 +54,6 @@ func Code(err error) StatusCode {
return code return code
} }
// If we have grpc attached try to get grpc code if possible.
if grpchook.HookedConvertToStatusCode != nil {
if code, ok := grpchook.HookedConvertToStatusCode(err); ok {
return StatusCode(code)
}
}
return Unknown return Unknown
} }
} }
@ -72,11 +64,6 @@ func Wrap(code StatusCode, err error) error {
return nil return nil
} }
// Should we also handle grpc error status codes.
if grpchook.HookedErrorWrap != nil {
return grpchook.HookedErrorWrap(grpchook.StatusCode(code), err)
}
ce := &codeErr{ ce := &codeErr{
code: code, code: code,
} }
@ -95,7 +82,7 @@ func Error(code StatusCode, msg string) error {
return Wrap(code, errs.New("%s", msg)) return Wrap(code, errs.New("%s", msg))
} }
// Errorf : Error :: fmt.Sprintf : fmt.Sprint // Errorf : Error :: fmt.Sprintf : fmt.Sprint.
func Errorf(code StatusCode, format string, a ...interface{}) error { func Errorf(code StatusCode, format string, a ...interface{}) error {
return Wrap(code, errs.New(format, a...)) return Wrap(code, errs.New(format, a...))
} }

View file

@ -92,8 +92,8 @@ func SignUplinkPieceHash(ctx context.Context, privateKey storj.PiecePrivateKey,
return &signed, nil return &signed, nil
} }
// SignStreamID signs the stream ID using the specified signer // SignStreamID signs the stream ID using the specified signer.
// Signer is a satellite // Signer is a satellite.
func SignStreamID(ctx context.Context, signer Signer, unsigned *pb.SatStreamID) (_ *pb.SatStreamID, err error) { func SignStreamID(ctx context.Context, signer Signer, unsigned *pb.SatStreamID) (_ *pb.SatStreamID, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeStreamID(ctx, unsigned) bytes, err := EncodeStreamID(ctx, unsigned)
@ -110,8 +110,8 @@ func SignStreamID(ctx context.Context, signer Signer, unsigned *pb.SatStreamID)
return &signed, nil return &signed, nil
} }
// SignSegmentID signs the segment ID using the specified signer // SignSegmentID signs the segment ID using the specified signer.
// Signer is a satellite // Signer is a satellite.
func SignSegmentID(ctx context.Context, signer Signer, unsigned *pb.SatSegmentID) (_ *pb.SatSegmentID, err error) { func SignSegmentID(ctx context.Context, signer Signer, unsigned *pb.SatSegmentID) (_ *pb.SatSegmentID, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeSegmentID(ctx, unsigned) bytes, err := EncodeSegmentID(ctx, unsigned)
@ -128,8 +128,8 @@ func SignSegmentID(ctx context.Context, signer Signer, unsigned *pb.SatSegmentID
return &signed, nil return &signed, nil
} }
// SignExitCompleted signs the ExitCompleted using the specified signer // SignExitCompleted signs the ExitCompleted using the specified signer.
// Signer is a satellite // Signer is a satellite.
func SignExitCompleted(ctx context.Context, signer Signer, unsigned *pb.ExitCompleted) (_ *pb.ExitCompleted, err error) { func SignExitCompleted(ctx context.Context, signer Signer, unsigned *pb.ExitCompleted) (_ *pb.ExitCompleted, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeExitCompleted(ctx, unsigned) bytes, err := EncodeExitCompleted(ctx, unsigned)
@ -146,8 +146,8 @@ func SignExitCompleted(ctx context.Context, signer Signer, unsigned *pb.ExitComp
return &signed, nil return &signed, nil
} }
// SignExitFailed signs the ExitFailed using the specified signer // SignExitFailed signs the ExitFailed using the specified signer.
// Signer is a satellite // Signer is a satellite.
func SignExitFailed(ctx context.Context, signer Signer, unsigned *pb.ExitFailed) (_ *pb.ExitFailed, err error) { func SignExitFailed(ctx context.Context, signer Signer, unsigned *pb.ExitFailed) (_ *pb.ExitFailed, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeExitFailed(ctx, unsigned) bytes, err := EncodeExitFailed(ctx, unsigned)

View file

@ -72,7 +72,7 @@ func VerifyUplinkPieceHashSignature(ctx context.Context, publicKey storj.PiecePu
return Error.Wrap(publicKey.Verify(bytes, signed.Signature)) return Error.Wrap(publicKey.Verify(bytes, signed.Signature))
} }
// VerifyStreamID verifies that the signature inside stream ID belongs to the satellite // VerifyStreamID verifies that the signature inside stream ID belongs to the satellite.
func VerifyStreamID(ctx context.Context, satellite Signee, signed *pb.SatStreamID) (err error) { func VerifyStreamID(ctx context.Context, satellite Signee, signed *pb.SatStreamID) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeStreamID(ctx, signed) bytes, err := EncodeStreamID(ctx, signed)
@ -83,7 +83,7 @@ func VerifyStreamID(ctx context.Context, satellite Signee, signed *pb.SatStreamI
return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature) return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature)
} }
// VerifySegmentID verifies that the signature inside segment ID belongs to the satellite // VerifySegmentID verifies that the signature inside segment ID belongs to the satellite.
func VerifySegmentID(ctx context.Context, satellite Signee, signed *pb.SatSegmentID) (err error) { func VerifySegmentID(ctx context.Context, satellite Signee, signed *pb.SatSegmentID) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeSegmentID(ctx, signed) bytes, err := EncodeSegmentID(ctx, signed)
@ -94,7 +94,7 @@ func VerifySegmentID(ctx context.Context, satellite Signee, signed *pb.SatSegmen
return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature) return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature)
} }
// VerifyExitCompleted verifies that the signature inside ExitCompleted belongs to the satellite // VerifyExitCompleted verifies that the signature inside ExitCompleted belongs to the satellite.
func VerifyExitCompleted(ctx context.Context, satellite Signee, signed *pb.ExitCompleted) (err error) { func VerifyExitCompleted(ctx context.Context, satellite Signee, signed *pb.ExitCompleted) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeExitCompleted(ctx, signed) bytes, err := EncodeExitCompleted(ctx, signed)
@ -105,7 +105,7 @@ func VerifyExitCompleted(ctx context.Context, satellite Signee, signed *pb.ExitC
return Error.Wrap(satellite.HashAndVerifySignature(ctx, bytes, signed.ExitCompleteSignature)) return Error.Wrap(satellite.HashAndVerifySignature(ctx, bytes, signed.ExitCompleteSignature))
} }
// VerifyExitFailed verifies that the signature inside ExitFailed belongs to the satellite // VerifyExitFailed verifies that the signature inside ExitFailed belongs to the satellite.
func VerifyExitFailed(ctx context.Context, satellite Signee, signed *pb.ExitFailed) (err error) { func VerifyExitFailed(ctx context.Context, satellite Signee, signed *pb.ExitFailed) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
bytes, err := EncodeExitFailed(ctx, signed) bytes, err := EncodeExitFailed(ctx, signed)

View file

@ -12,17 +12,17 @@ import (
) )
var ( var (
// ErrBucket is an error class for general bucket errors // ErrBucket is an error class for general bucket errors.
ErrBucket = errs.Class("bucket") ErrBucket = errs.Class("bucket")
// ErrNoBucket is an error class for using empty bucket name // ErrNoBucket is an error class for using empty bucket name.
ErrNoBucket = errs.Class("no bucket specified") ErrNoBucket = errs.Class("no bucket specified")
// ErrBucketNotFound is an error class for non-existing bucket // ErrBucketNotFound is an error class for non-existing bucket.
ErrBucketNotFound = errs.Class("bucket not found") ErrBucketNotFound = errs.Class("bucket not found")
) )
// Bucket contains information about a specific bucket // Bucket contains information about a specific bucket.
type Bucket struct { type Bucket struct {
ID uuid.UUID ID uuid.UUID
Name string Name string

View file

@ -1,7 +1,5 @@
// Copyright (C) 2019 Storj Labs, Inc. // Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information. // See LICENSE for copying information.
/*Package storj contains the types which represent the main entities of the // Package storj contains the types which represent the main entities of the Storj domain.
Storj domain.
*/
package storj package storj

View file

@ -9,7 +9,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// EncryptionParameters is the cipher suite and parameters used for encryption // EncryptionParameters is the cipher suite and parameters used for encryption.
type EncryptionParameters struct { type EncryptionParameters struct {
// CipherSuite specifies the cipher suite to be used for encryption. // CipherSuite specifies the cipher suite to be used for encryption.
CipherSuite CipherSuite CipherSuite CipherSuite
@ -24,7 +24,7 @@ type EncryptionParameters struct {
BlockSize int32 BlockSize int32
} }
// IsZero returns true if no field in the struct is set to non-zero value // IsZero returns true if no field in the struct is set to non-zero value.
func (params EncryptionParameters) IsZero() bool { func (params EncryptionParameters) IsZero() bool {
return params == (EncryptionParameters{}) return params == (EncryptionParameters{})
} }
@ -45,11 +45,11 @@ const (
// by the NaCl cryptography library under the name "Secretbox". // by the NaCl cryptography library under the name "Secretbox".
EncSecretBox EncSecretBox
// EncNullBase64URL is like EncNull but Base64 encodes/decodes the // EncNullBase64URL is like EncNull but Base64 encodes/decodes the
// binary path data (URL-safe) // binary path data (URL-safe).
EncNullBase64URL EncNullBase64URL
) )
// Constant definitions for key and nonce sizes // Constant definitions for key and nonce sizes.
const ( const (
KeySize = 32 KeySize = 32
NonceSize = 24 NonceSize = 24
@ -66,29 +66,29 @@ func NewKey(humanReadableKey []byte) (*Key, error) {
return &key, nil return &key, nil
} }
// Key represents the largest key used by any encryption protocol // Key represents the largest key used by any encryption protocol.
type Key [KeySize]byte type Key [KeySize]byte
// Raw returns the key as a raw byte array pointer // Raw returns the key as a raw byte array pointer.
func (key *Key) Raw() *[KeySize]byte { func (key *Key) Raw() *[KeySize]byte {
return (*[KeySize]byte)(key) return (*[KeySize]byte)(key)
} }
// IsZero returns true if key is nil or it points to its zero value // IsZero returns true if key is nil or it points to its zero value.
func (key *Key) IsZero() bool { func (key *Key) IsZero() bool {
return key == nil || *key == (Key{}) return key == nil || *key == (Key{})
} }
// ErrNonce is used when something goes wrong with a stream ID // ErrNonce is used when something goes wrong with a stream ID.
var ErrNonce = errs.Class("nonce error") var ErrNonce = errs.Class("nonce error")
// nonceEncoding is base32 without padding // nonceEncoding is base32 without padding.
var nonceEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) var nonceEncoding = base32.StdEncoding.WithPadding(base32.NoPadding)
// Nonce represents the largest nonce used by any encryption protocol // Nonce represents the largest nonce used by any encryption protocol.
type Nonce [NonceSize]byte type Nonce [NonceSize]byte
// NonceFromString decodes an base32 encoded // NonceFromString decodes an base32 encoded.
func NonceFromString(s string) (Nonce, error) { func NonceFromString(s string) (Nonce, error) {
nonceBytes, err := nonceEncoding.DecodeString(s) nonceBytes, err := nonceEncoding.DecodeString(s)
if err != nil { if err != nil {
@ -97,7 +97,7 @@ func NonceFromString(s string) (Nonce, error) {
return NonceFromBytes(nonceBytes) return NonceFromBytes(nonceBytes)
} }
// NonceFromBytes converts a byte slice into a nonce // NonceFromBytes converts a byte slice into a nonce.
func NonceFromBytes(b []byte) (Nonce, error) { func NonceFromBytes(b []byte) (Nonce, error) {
if len(b) != len(Nonce{}) { if len(b) != len(Nonce{}) {
return Nonce{}, ErrNonce.New("not enough bytes to make a nonce; have %d, need %d", len(b), len(NodeID{})) return Nonce{}, ErrNonce.New("not enough bytes to make a nonce; have %d, need %d", len(b), len(NodeID{}))
@ -108,56 +108,56 @@ func NonceFromBytes(b []byte) (Nonce, error) {
return nonce, nil return nonce, nil
} }
// IsZero returns whether nonce is unassigned // IsZero returns whether nonce is unassigned.
func (nonce Nonce) IsZero() bool { func (nonce Nonce) IsZero() bool {
return nonce == Nonce{} return nonce == Nonce{}
} }
// String representation of the nonce // String representation of the nonce.
func (nonce Nonce) String() string { return nonceEncoding.EncodeToString(nonce.Bytes()) } func (nonce Nonce) String() string { return nonceEncoding.EncodeToString(nonce.Bytes()) }
// Bytes returns bytes of the nonce // Bytes returns bytes of the nonce.
func (nonce Nonce) Bytes() []byte { return nonce[:] } func (nonce Nonce) Bytes() []byte { return nonce[:] }
// Raw returns the nonce as a raw byte array pointer // Raw returns the nonce as a raw byte array pointer.
func (nonce *Nonce) Raw() *[NonceSize]byte { func (nonce *Nonce) Raw() *[NonceSize]byte {
return (*[NonceSize]byte)(nonce) return (*[NonceSize]byte)(nonce)
} }
// Marshal serializes a nonce // Marshal serializes a nonce.
func (nonce Nonce) Marshal() ([]byte, error) { func (nonce Nonce) Marshal() ([]byte, error) {
return nonce.Bytes(), nil return nonce.Bytes(), nil
} }
// MarshalTo serializes a nonce into the passed byte slice // MarshalTo serializes a nonce into the passed byte slice.
func (nonce *Nonce) MarshalTo(data []byte) (n int, err error) { func (nonce *Nonce) MarshalTo(data []byte) (n int, err error) {
n = copy(data, nonce.Bytes()) n = copy(data, nonce.Bytes())
return n, nil return n, nil
} }
// Unmarshal deserializes a nonce // Unmarshal deserializes a nonce.
func (nonce *Nonce) Unmarshal(data []byte) error { func (nonce *Nonce) Unmarshal(data []byte) error {
var err error var err error
*nonce, err = NonceFromBytes(data) *nonce, err = NonceFromBytes(data)
return err return err
} }
// Size returns the length of a nonce (implements gogo's custom type interface) // Size returns the length of a nonce (implements gogo's custom type interface).
func (nonce Nonce) Size() int { func (nonce Nonce) Size() int {
return len(nonce) return len(nonce)
} }
// MarshalJSON serializes a nonce to a json string as bytes // MarshalJSON serializes a nonce to a json string as bytes.
func (nonce Nonce) MarshalJSON() ([]byte, error) { func (nonce Nonce) MarshalJSON() ([]byte, error) {
return []byte(`"` + nonce.String() + `"`), nil return []byte(`"` + nonce.String() + `"`), nil
} }
// UnmarshalJSON deserializes a json string (as bytes) to a nonce // UnmarshalJSON deserializes a json string (as bytes) to a nonce.
func (nonce *Nonce) UnmarshalJSON(data []byte) error { func (nonce *Nonce) UnmarshalJSON(data []byte) error {
var err error var err error
*nonce, err = NonceFromString(string(data)) *nonce, err = NonceFromString(string(data))
return err return err
} }
// EncryptedPrivateKey is a private key that has been encrypted // EncryptedPrivateKey is a private key that has been encrypted.
type EncryptedPrivateKey []byte type EncryptedPrivateKey []byte

View file

@ -3,21 +3,21 @@
package storj package storj
// ListDirection specifies listing direction // ListDirection specifies listing direction.
type ListDirection int8 type ListDirection int8
const ( const (
// Before lists backwards from cursor, without cursor [NOT SUPPORTED] // Before lists backwards from cursor, without cursor [NOT SUPPORTED].
Before = ListDirection(-2) Before = ListDirection(-2)
// Backward lists backwards from cursor, including cursor [NOT SUPPORTED] // Backward lists backwards from cursor, including cursor [NOT SUPPORTED].
Backward = ListDirection(-1) Backward = ListDirection(-1)
// Forward lists forwards from cursor, including cursor // Forward lists forwards from cursor, including cursor.
Forward = ListDirection(1) Forward = ListDirection(1)
// After lists forwards from cursor, without cursor // After lists forwards from cursor, without cursor.
After = ListDirection(2) After = ListDirection(2)
) )
// ListOptions lists objects // ListOptions lists objects.
type ListOptions struct { type ListOptions struct {
Prefix Path Prefix Path
Cursor Path // Cursor is relative to Prefix, full path is Prefix + Cursor Cursor Path // Cursor is relative to Prefix, full path is Prefix + Cursor
@ -27,7 +27,7 @@ type ListOptions struct {
Limit int Limit int
} }
// ObjectList is a list of objects // ObjectList is a list of objects.
type ObjectList struct { type ObjectList struct {
Bucket string Bucket string
Prefix Path Prefix Path
@ -38,7 +38,7 @@ type ObjectList struct {
Items []Object Items []Object
} }
// NextPage returns options for listing the next page // NextPage returns options for listing the next page.
func (opts ListOptions) NextPage(list ObjectList) ListOptions { func (opts ListOptions) NextPage(list ObjectList) ListOptions {
if !list.More || len(list.Items) == 0 { if !list.More || len(list.Items) == 0 {
return ListOptions{} return ListOptions{}
@ -47,25 +47,27 @@ func (opts ListOptions) NextPage(list ObjectList) ListOptions {
return ListOptions{ return ListOptions{
Prefix: opts.Prefix, Prefix: opts.Prefix,
Cursor: list.Items[len(list.Items)-1].Path, Cursor: list.Items[len(list.Items)-1].Path,
Delimiter: opts.Delimiter,
Recursive: opts.Recursive,
Direction: After, Direction: After,
Limit: opts.Limit, Limit: opts.Limit,
} }
} }
// BucketListOptions lists objects // BucketListOptions lists objects.
type BucketListOptions struct { type BucketListOptions struct {
Cursor string Cursor string
Direction ListDirection Direction ListDirection
Limit int Limit int
} }
// BucketList is a list of buckets // BucketList is a list of buckets.
type BucketList struct { type BucketList struct {
More bool More bool
Items []Bucket Items []Bucket
} }
// NextPage returns options for listing the next page // NextPage returns options for listing the next page.
func (opts BucketListOptions) NextPage(list BucketList) BucketListOptions { func (opts BucketListOptions) NextPage(list BucketList) BucketListOptions {
if !list.More || len(list.Items) == 0 { if !list.More || len(list.Items) == 0 {
return BucketListOptions{} return BucketListOptions{}

50
vendor/storj.io/common/storj/node.go generated vendored
View file

@ -23,13 +23,13 @@ var (
ErrVersion = errs.Class("node ID version error") ErrVersion = errs.Class("node ID version error")
) )
// NodeIDSize is the byte length of a NodeID // NodeIDSize is the byte length of a NodeID.
const NodeIDSize = sha256.Size const NodeIDSize = sha256.Size
// NodeID is a unique node identifier // NodeID is a unique node identifier.
type NodeID [NodeIDSize]byte type NodeID [NodeIDSize]byte
// NodeIDList is a slice of NodeIDs (implements sort) // NodeIDList is a slice of NodeIDs (implements sort).
type NodeIDList []NodeID type NodeIDList []NodeID
// NewVersionedID adds an identity version to a node ID. // NewVersionedID adds an identity version to a node ID.
@ -42,7 +42,7 @@ func NewVersionedID(id NodeID, version IDVersion) NodeID {
} }
// NewVersionExt creates a new identity version certificate extension for the // NewVersionExt creates a new identity version certificate extension for the
// given identity version, // given identity version.
func NewVersionExt(version IDVersion) pkix.Extension { func NewVersionExt(version IDVersion) pkix.Extension {
return pkix.Extension{ return pkix.Extension{
Id: extensions.IdentityVersionExtID, Id: extensions.IdentityVersionExtID,
@ -50,7 +50,7 @@ func NewVersionExt(version IDVersion) pkix.Extension {
} }
} }
// NodeIDFromString decodes a base58check encoded node id string // NodeIDFromString decodes a base58check encoded node id string.
func NodeIDFromString(s string) (NodeID, error) { func NodeIDFromString(s string) (NodeID, error) {
idBytes, versionNumber, err := base58.CheckDecode(s) idBytes, versionNumber, err := base58.CheckDecode(s)
if err != nil { if err != nil {
@ -65,7 +65,7 @@ func NodeIDFromString(s string) (NodeID, error) {
return NewVersionedID(unversionedID, version), nil return NewVersionedID(unversionedID, version), nil
} }
// NodeIDsFromBytes converts a 2d byte slice into a list of nodes // NodeIDsFromBytes converts a 2d byte slice into a list of nodes.
func NodeIDsFromBytes(b [][]byte) (ids NodeIDList, err error) { func NodeIDsFromBytes(b [][]byte) (ids NodeIDList, err error) {
var idErrs []error var idErrs []error
for _, idBytes := range b { for _, idBytes := range b {
@ -84,7 +84,7 @@ func NodeIDsFromBytes(b [][]byte) (ids NodeIDList, err error) {
return ids, nil return ids, nil
} }
// NodeIDFromBytes converts a byte slice into a node id // NodeIDFromBytes converts a byte slice into a node id.
func NodeIDFromBytes(b []byte) (NodeID, error) { func NodeIDFromBytes(b []byte) (NodeID, error) {
bLen := len(b) bLen := len(b)
if bLen != len(NodeID{}) { if bLen != len(NodeID{}) {
@ -96,18 +96,18 @@ func NodeIDFromBytes(b []byte) (NodeID, error) {
return id, nil return id, nil
} }
// String returns NodeID as base58 encoded string with checksum and version bytes // String returns NodeID as base58 encoded string with checksum and version bytes.
func (id NodeID) String() string { func (id NodeID) String() string {
unversionedID := id.unversioned() unversionedID := id.unversioned()
return base58.CheckEncode(unversionedID[:], byte(id.Version().Number)) return base58.CheckEncode(unversionedID[:], byte(id.Version().Number))
} }
// IsZero returns whether NodeID is unassigned // IsZero returns whether NodeID is unassigned.
func (id NodeID) IsZero() bool { func (id NodeID) IsZero() bool {
return id == NodeID{} return id == NodeID{}
} }
// Bytes returns raw bytes of the id // Bytes returns raw bytes of the id.
func (id NodeID) Bytes() []byte { return id[:] } func (id NodeID) Bytes() []byte { return id[:] }
// Less returns whether id is smaller than other in lexicographic order. // Less returns whether id is smaller than other in lexicographic order.
@ -122,7 +122,7 @@ func (id NodeID) Less(other NodeID) bool {
return false return false
} }
// Version returns the version of the identity format // Version returns the version of the identity format.
func (id NodeID) Version() IDVersion { func (id NodeID) Version() IDVersion {
versionNumber := id.versionByte() versionNumber := id.versionByte()
if versionNumber == 0 { if versionNumber == 0 {
@ -138,7 +138,7 @@ func (id NodeID) Version() IDVersion {
return version return version
} }
// Difficulty returns the number of trailing zero bits in a node ID // Difficulty returns the number of trailing zero bits in a node ID.
func (id NodeID) Difficulty() (uint16, error) { func (id NodeID) Difficulty() (uint16, error) {
idLen := len(id) idLen := len(id)
var b byte var b byte
@ -161,18 +161,18 @@ func (id NodeID) Difficulty() (uint16, error) {
return 0, ErrNodeID.New("difficulty matches id hash length: %d; hash (hex): % x", idLen, id) return 0, ErrNodeID.New("difficulty matches id hash length: %d; hash (hex): % x", idLen, id)
} }
// Marshal serializes a node id // Marshal serializes a node id.
func (id NodeID) Marshal() ([]byte, error) { func (id NodeID) Marshal() ([]byte, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// MarshalTo serializes a node ID into the passed byte slice // MarshalTo serializes a node ID into the passed byte slice.
func (id *NodeID) MarshalTo(data []byte) (n int, err error) { func (id *NodeID) MarshalTo(data []byte) (n int, err error) {
n = copy(data, id.Bytes()) n = copy(data, id.Bytes())
return n, nil return n, nil
} }
// Unmarshal deserializes a node ID // Unmarshal deserializes a node ID.
func (id *NodeID) Unmarshal(data []byte) error { func (id *NodeID) Unmarshal(data []byte) error {
var err error var err error
*id, err = NodeIDFromBytes(data) *id, err = NodeIDFromBytes(data)
@ -192,22 +192,22 @@ func (id NodeID) unversioned() NodeID {
return unversionedID return unversionedID
} }
// Size returns the length of a node ID (implements gogo's custom type interface) // Size returns the length of a node ID (implements gogo's custom type interface).
func (id *NodeID) Size() int { func (id *NodeID) Size() int {
return len(id) return len(id)
} }
// MarshalJSON serializes a node ID to a json string as bytes // MarshalJSON serializes a node ID to a json string as bytes.
func (id NodeID) MarshalJSON() ([]byte, error) { func (id NodeID) MarshalJSON() ([]byte, error) {
return []byte(`"` + id.String() + `"`), nil return []byte(`"` + id.String() + `"`), nil
} }
// Value converts a NodeID to a database field // Value converts a NodeID to a database field.
func (id NodeID) Value() (driver.Value, error) { func (id NodeID) Value() (driver.Value, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// Scan extracts a NodeID from a database field // Scan extracts a NodeID from a database field.
func (id *NodeID) Scan(src interface{}) (err error) { func (id *NodeID) Scan(src interface{}) (err error) {
b, ok := src.([]byte) b, ok := src.([]byte)
if !ok { if !ok {
@ -218,7 +218,7 @@ func (id *NodeID) Scan(src interface{}) (err error) {
return err return err
} }
// UnmarshalJSON deserializes a json string (as bytes) to a node ID // UnmarshalJSON deserializes a json string (as bytes) to a node ID.
func (id *NodeID) UnmarshalJSON(data []byte) error { func (id *NodeID) UnmarshalJSON(data []byte) error {
var unquoted string var unquoted string
err := json.Unmarshal(data, &unquoted) err := json.Unmarshal(data, &unquoted)
@ -233,7 +233,7 @@ func (id *NodeID) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// Strings returns a string slice of the node IDs // Strings returns a string slice of the node IDs.
func (n NodeIDList) Strings() []string { func (n NodeIDList) Strings() []string {
var strings []string var strings []string
for _, nid := range n { for _, nid := range n {
@ -242,7 +242,7 @@ func (n NodeIDList) Strings() []string {
return strings return strings
} }
// Bytes returns a 2d byte slice of the node IDs // Bytes returns a 2d byte slice of the node IDs.
func (n NodeIDList) Bytes() (idsBytes [][]byte) { func (n NodeIDList) Bytes() (idsBytes [][]byte) {
for _, nid := range n { for _, nid := range n {
idsBytes = append(idsBytes, nid.Bytes()) idsBytes = append(idsBytes, nid.Bytes())
@ -250,11 +250,11 @@ func (n NodeIDList) Bytes() (idsBytes [][]byte) {
return idsBytes return idsBytes
} }
// Len implements sort.Interface.Len() // Len implements sort.Interface.Len().
func (n NodeIDList) Len() int { return len(n) } func (n NodeIDList) Len() int { return len(n) }
// Swap implements sort.Interface.Swap() // Swap implements sort.Interface.Swap().
func (n NodeIDList) Swap(i, j int) { n[i], n[j] = n[j], n[i] } func (n NodeIDList) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
// Less implements sort.Interface.Less() // Less implements sort.Interface.Less().
func (n NodeIDList) Less(i, j int) bool { return n[i].Less(n[j]) } func (n NodeIDList) Less(i, j int) bool { return n[i].Less(n[j]) }

View file

@ -70,7 +70,7 @@ func (url NodeURL) IsZero() bool {
return url == NodeURL{} return url == NodeURL{}
} }
// String converts NodeURL to a string // String converts NodeURL to a string.
func (url NodeURL) String() string { func (url NodeURL) String() string {
if url.ID.IsZero() { if url.ID.IsZero() {
return url.Address return url.Address
@ -78,7 +78,7 @@ func (url NodeURL) String() string {
return url.ID.String() + "@" + url.Address return url.ID.String() + "@" + url.Address
} }
// Set implements flag.Value interface // Set implements flag.Value interface.
func (url *NodeURL) Set(s string) error { func (url *NodeURL) Set(s string) error {
parsed, err := ParseNodeURL(s) parsed, err := ParseNodeURL(s)
if err != nil { if err != nil {
@ -89,13 +89,13 @@ func (url *NodeURL) Set(s string) error {
return nil return nil
} }
// Type implements pflag.Value // Type implements pflag.Value.
func (NodeURL) Type() string { return "storj.NodeURL" } func (NodeURL) Type() string { return "storj.NodeURL" }
// NodeURLs defines a comma delimited flag for defining a list node url-s. // NodeURLs defines a comma delimited flag for defining a list node url-s.
type NodeURLs []NodeURL type NodeURLs []NodeURL
// ParseNodeURLs parses comma delimited list of node urls // ParseNodeURLs parses comma delimited list of node urls.
func ParseNodeURLs(s string) (NodeURLs, error) { func ParseNodeURLs(s string) (NodeURLs, error) {
var urls NodeURLs var urls NodeURLs
if s == "" { if s == "" {
@ -113,7 +113,7 @@ func ParseNodeURLs(s string) (NodeURLs, error) {
return urls, nil return urls, nil
} }
// String converts NodeURLs to a string // String converts NodeURLs to a string.
func (urls NodeURLs) String() string { func (urls NodeURLs) String() string {
var xs []string var xs []string
for _, u := range urls { for _, u := range urls {
@ -122,7 +122,7 @@ func (urls NodeURLs) String() string {
return strings.Join(xs, ",") return strings.Join(xs, ",")
} }
// Set implements flag.Value interface // Set implements flag.Value interface.
func (urls *NodeURLs) Set(s string) error { func (urls *NodeURLs) Set(s string) error {
parsed, err := ParseNodeURLs(s) parsed, err := ParseNodeURLs(s)
if err != nil { if err != nil {
@ -133,5 +133,5 @@ func (urls *NodeURLs) Set(s string) error {
return nil return nil
} }
// Type implements pflag.Value // Type implements pflag.Value.
func (NodeURLs) Type() string { return "storj.NodeURLs" } func (NodeURLs) Type() string { return "storj.NodeURLs" }

View file

@ -10,14 +10,14 @@ import (
) )
var ( var (
// ErrNoPath is an error class for using empty path // ErrNoPath is an error class for using empty path.
ErrNoPath = errs.Class("no path specified") ErrNoPath = errs.Class("no path specified")
// ErrObjectNotFound is an error class for non-existing object // ErrObjectNotFound is an error class for non-existing object.
ErrObjectNotFound = errs.Class("object not found") ErrObjectNotFound = errs.Class("object not found")
) )
// Object contains information about a specific object // Object contains information about a specific object.
type Object struct { type Object struct {
Version uint32 Version uint32
Bucket Bucket Bucket Bucket
@ -34,7 +34,7 @@ type Object struct {
Stream Stream
} }
// ObjectInfo contains information about a specific object // ObjectInfo contains information about a specific object.
type ObjectInfo struct { type ObjectInfo struct {
Version uint32 Version uint32
Bucket string Bucket string
@ -53,7 +53,7 @@ type ObjectInfo struct {
Stream Stream
} }
// Stream is information about an object stream // Stream is information about an object stream.
type Stream struct { type Stream struct {
ID StreamID ID StreamID
@ -76,15 +76,16 @@ type Stream struct {
LastSegment LastSegment // TODO: remove LastSegment LastSegment // TODO: remove
} }
// LastSegment contains info about last segment // LastSegment contains info about last segment.
// TODO: remove //
// TODO: remove.
type LastSegment struct { type LastSegment struct {
Size int64 Size int64
EncryptedKeyNonce Nonce EncryptedKeyNonce Nonce
EncryptedKey EncryptedPrivateKey EncryptedKey EncryptedPrivateKey
} }
// Segment is full segment information // Segment is full segment information.
type Segment struct { type Segment struct {
Index int64 Index int64
// Size is the size of the content in bytes // Size is the size of the content in bytes
@ -101,7 +102,7 @@ type Segment struct {
EncryptedKey EncryptedPrivateKey EncryptedKey EncryptedPrivateKey
} }
// Piece is information where a piece is located // Piece is information where a piece is located.
type Piece struct { type Piece struct {
Number byte Number byte
Location NodeID Location NodeID

View file

@ -7,7 +7,7 @@ import (
"time" "time"
) )
// ObjectListItem represents listed object // ObjectListItem represents listed object.
type ObjectListItem struct { type ObjectListItem struct {
EncryptedPath []byte EncryptedPath []byte
Version int32 Version int32

View file

@ -7,15 +7,15 @@ import (
"strings" "strings"
) )
// Path represents a object path // Path represents a object path.
type Path = string type Path = string
// SplitPath splits path into a slice of path components // SplitPath splits path into a slice of path components.
func SplitPath(path Path) []string { func SplitPath(path Path) []string {
return strings.Split(path, "/") return strings.Split(path, "/")
} }
// JoinPaths concatenates paths to a new single path // JoinPaths concatenates paths to a new single path.
func JoinPaths(paths ...Path) Path { func JoinPaths(paths ...Path) Path {
return strings.Join(paths, "/") return strings.Join(paths, "/")
} }

View file

@ -15,16 +15,16 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// ErrPieceID is used when something goes wrong with a piece ID // ErrPieceID is used when something goes wrong with a piece ID.
var ErrPieceID = errs.Class("piece ID error") var ErrPieceID = errs.Class("piece ID error")
// pieceIDEncoding is base32 without padding // pieceIDEncoding is base32 without padding.
var pieceIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) var pieceIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding)
// PieceID is the unique identifier for pieces // PieceID is the unique identifier for pieces.
type PieceID [32]byte type PieceID [32]byte
// NewPieceID creates a piece ID // NewPieceID creates a piece ID.
func NewPieceID() PieceID { func NewPieceID() PieceID {
var id PieceID var id PieceID
@ -36,7 +36,7 @@ func NewPieceID() PieceID {
return id return id
} }
// PieceIDFromString decodes a hex encoded piece ID string // PieceIDFromString decodes a hex encoded piece ID string.
func PieceIDFromString(s string) (PieceID, error) { func PieceIDFromString(s string) (PieceID, error) {
idBytes, err := pieceIDEncoding.DecodeString(s) idBytes, err := pieceIDEncoding.DecodeString(s)
if err != nil { if err != nil {
@ -45,7 +45,7 @@ func PieceIDFromString(s string) (PieceID, error) {
return PieceIDFromBytes(idBytes) return PieceIDFromBytes(idBytes)
} }
// PieceIDFromBytes converts a byte slice into a piece ID // PieceIDFromBytes converts a byte slice into a piece ID.
func PieceIDFromBytes(b []byte) (PieceID, error) { func PieceIDFromBytes(b []byte) (PieceID, error) {
if len(b) != len(PieceID{}) { if len(b) != len(PieceID{}) {
return PieceID{}, ErrPieceID.New("not enough bytes to make a piece ID; have %d, need %d", len(b), len(PieceID{})) return PieceID{}, ErrPieceID.New("not enough bytes to make a piece ID; have %d, need %d", len(b), len(PieceID{}))
@ -56,18 +56,18 @@ func PieceIDFromBytes(b []byte) (PieceID, error) {
return id, nil return id, nil
} }
// IsZero returns whether piece ID is unassigned // IsZero returns whether piece ID is unassigned.
func (id PieceID) IsZero() bool { func (id PieceID) IsZero() bool {
return id == PieceID{} return id == PieceID{}
} }
// String representation of the piece ID // String representation of the piece ID.
func (id PieceID) String() string { return pieceIDEncoding.EncodeToString(id.Bytes()) } func (id PieceID) String() string { return pieceIDEncoding.EncodeToString(id.Bytes()) }
// Bytes returns bytes of the piece ID // Bytes returns bytes of the piece ID.
func (id PieceID) Bytes() []byte { return id[:] } func (id PieceID) Bytes() []byte { return id[:] }
// Derive a new PieceID from the current piece ID, the given storage node ID and piece number // Derive a new PieceID from the current piece ID, the given storage node ID and piece number.
func (id PieceID) Derive(storagenodeID NodeID, pieceNum int32) PieceID { func (id PieceID) Derive(storagenodeID NodeID, pieceNum int32) PieceID {
// TODO: should the secret / content be swapped? // TODO: should the secret / content be swapped?
mac := hmac.New(sha512.New, id.Bytes()) mac := hmac.New(sha512.New, id.Bytes())
@ -80,35 +80,35 @@ func (id PieceID) Derive(storagenodeID NodeID, pieceNum int32) PieceID {
return derived return derived
} }
// Marshal serializes a piece ID // Marshal serializes a piece ID.
func (id PieceID) Marshal() ([]byte, error) { func (id PieceID) Marshal() ([]byte, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// MarshalTo serializes a piece ID into the passed byte slice // MarshalTo serializes a piece ID into the passed byte slice.
func (id *PieceID) MarshalTo(data []byte) (n int, err error) { func (id *PieceID) MarshalTo(data []byte) (n int, err error) {
n = copy(data, id.Bytes()) n = copy(data, id.Bytes())
return n, nil return n, nil
} }
// Unmarshal deserializes a piece ID // Unmarshal deserializes a piece ID.
func (id *PieceID) Unmarshal(data []byte) error { func (id *PieceID) Unmarshal(data []byte) error {
var err error var err error
*id, err = PieceIDFromBytes(data) *id, err = PieceIDFromBytes(data)
return err return err
} }
// Size returns the length of a piece ID (implements gogo's custom type interface) // Size returns the length of a piece ID (implements gogo's custom type interface).
func (id *PieceID) Size() int { func (id *PieceID) Size() int {
return len(id) return len(id)
} }
// MarshalJSON serializes a piece ID to a json string as bytes // MarshalJSON serializes a piece ID to a json string as bytes.
func (id PieceID) MarshalJSON() ([]byte, error) { func (id PieceID) MarshalJSON() ([]byte, error) {
return []byte(`"` + id.String() + `"`), nil return []byte(`"` + id.String() + `"`), nil
} }
// UnmarshalJSON deserializes a json string (as bytes) to a piece ID // UnmarshalJSON deserializes a json string (as bytes) to a piece ID.
func (id *PieceID) UnmarshalJSON(data []byte) error { func (id *PieceID) UnmarshalJSON(data []byte) error {
var unquoted string var unquoted string
err := json.Unmarshal(data, &unquoted) err := json.Unmarshal(data, &unquoted)
@ -123,12 +123,12 @@ func (id *PieceID) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// Value set a PieceID to a database field // Value set a PieceID to a database field.
func (id PieceID) Value() (driver.Value, error) { func (id PieceID) Value() (driver.Value, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// Scan extracts a PieceID from a database field // Scan extracts a PieceID from a database field.
func (id *PieceID) Scan(src interface{}) (err error) { func (id *PieceID) Scan(src interface{}) (err error) {
b, ok := src.([]byte) b, ok := src.([]byte)
if !ok { if !ok {

View file

@ -10,20 +10,20 @@ import (
"golang.org/x/crypto/ed25519" "golang.org/x/crypto/ed25519"
) )
// ErrPieceKey is used when something goes wrong with a piece key // ErrPieceKey is used when something goes wrong with a piece key.
var ErrPieceKey = errs.Class("piece key error") var ErrPieceKey = errs.Class("piece key error")
// PiecePublicKey is the unique identifier for pieces // PiecePublicKey is the unique identifier for pieces.
type PiecePublicKey struct { type PiecePublicKey struct {
pub ed25519.PublicKey pub ed25519.PublicKey
} }
// PiecePrivateKey is the unique identifier for pieces // PiecePrivateKey is the unique identifier for pieces.
type PiecePrivateKey struct { type PiecePrivateKey struct {
priv ed25519.PrivateKey priv ed25519.PrivateKey
} }
// NewPieceKey creates a piece key pair // NewPieceKey creates a piece key pair.
func NewPieceKey() (PiecePublicKey, PiecePrivateKey, error) { func NewPieceKey() (PiecePublicKey, PiecePrivateKey, error) {
pub, priv, err := ed25519.GenerateKey(nil) pub, priv, err := ed25519.GenerateKey(nil)
@ -65,10 +65,10 @@ func (key PiecePublicKey) Verify(data, signature []byte) error {
return nil return nil
} }
// Bytes returns bytes of the piece public key // Bytes returns bytes of the piece public key.
func (key PiecePublicKey) Bytes() []byte { return key.pub[:] } func (key PiecePublicKey) Bytes() []byte { return key.pub[:] }
// Bytes returns bytes of the piece private key // Bytes returns bytes of the piece private key.
func (key PiecePrivateKey) Bytes() []byte { return key.priv[:] } func (key PiecePrivateKey) Bytes() []byte { return key.priv[:] }
// IsZero returns whether the key is empty. // IsZero returns whether the key is empty.
@ -77,25 +77,25 @@ func (key PiecePublicKey) IsZero() bool { return len(key.pub) == 0 }
// IsZero returns whether the key is empty. // IsZero returns whether the key is empty.
func (key PiecePrivateKey) IsZero() bool { return len(key.priv) == 0 } func (key PiecePrivateKey) IsZero() bool { return len(key.priv) == 0 }
// Marshal serializes a piece public key // Marshal serializes a piece public key.
func (key PiecePublicKey) Marshal() ([]byte, error) { return key.Bytes(), nil } func (key PiecePublicKey) Marshal() ([]byte, error) { return key.Bytes(), nil }
// Marshal serializes a piece private key // Marshal serializes a piece private key.
func (key PiecePrivateKey) Marshal() ([]byte, error) { return key.Bytes(), nil } func (key PiecePrivateKey) Marshal() ([]byte, error) { return key.Bytes(), nil }
// MarshalTo serializes a piece public key into the passed byte slice // MarshalTo serializes a piece public key into the passed byte slice.
func (key *PiecePublicKey) MarshalTo(data []byte) (n int, err error) { func (key *PiecePublicKey) MarshalTo(data []byte) (n int, err error) {
n = copy(data, key.Bytes()) n = copy(data, key.Bytes())
return n, nil return n, nil
} }
// MarshalTo serializes a piece private key into the passed byte slice // MarshalTo serializes a piece private key into the passed byte slice.
func (key *PiecePrivateKey) MarshalTo(data []byte) (n int, err error) { func (key *PiecePrivateKey) MarshalTo(data []byte) (n int, err error) {
n = copy(data, key.Bytes()) n = copy(data, key.Bytes())
return n, nil return n, nil
} }
// Unmarshal deserializes a piece public key // Unmarshal deserializes a piece public key.
func (key *PiecePublicKey) Unmarshal(data []byte) error { func (key *PiecePublicKey) Unmarshal(data []byte) error {
// allow empty keys // allow empty keys
if len(data) == 0 { if len(data) == 0 {
@ -107,7 +107,7 @@ func (key *PiecePublicKey) Unmarshal(data []byte) error {
return err return err
} }
// Unmarshal deserializes a piece private key // Unmarshal deserializes a piece private key.
func (key *PiecePrivateKey) Unmarshal(data []byte) error { func (key *PiecePrivateKey) Unmarshal(data []byte) error {
// allow empty keys // allow empty keys
if len(data) == 0 { if len(data) == 0 {
@ -122,21 +122,21 @@ func (key *PiecePrivateKey) Unmarshal(data []byte) error {
return err return err
} }
// Size returns the length of a piece public key (implements gogo's custom type interface) // Size returns the length of a piece public key (implements gogo's custom type interface).
func (key *PiecePublicKey) Size() int { return len(key.pub) } func (key *PiecePublicKey) Size() int { return len(key.pub) }
// Size returns the length of a piece private key (implements gogo's custom type interface) // Size returns the length of a piece private key (implements gogo's custom type interface).
func (key *PiecePrivateKey) Size() int { return len(key.priv) } func (key *PiecePrivateKey) Size() int { return len(key.priv) }
// Value set a PiecePublicKey to a database field // Value set a PiecePublicKey to a database field.
func (key PiecePublicKey) Value() (driver.Value, error) { func (key PiecePublicKey) Value() (driver.Value, error) {
return key.Bytes(), nil return key.Bytes(), nil
} }
// Value set a PiecePrivateKey to a database field // Value set a PiecePrivateKey to a database field.
func (key PiecePrivateKey) Value() (driver.Value, error) { return key.Bytes(), nil } func (key PiecePrivateKey) Value() (driver.Value, error) { return key.Bytes(), nil }
// Scan extracts a PiecePublicKey from a database field // Scan extracts a PiecePublicKey from a database field.
func (key *PiecePublicKey) Scan(src interface{}) (err error) { func (key *PiecePublicKey) Scan(src interface{}) (err error) {
b, ok := src.([]byte) b, ok := src.([]byte)
if !ok { if !ok {
@ -147,7 +147,7 @@ func (key *PiecePublicKey) Scan(src interface{}) (err error) {
return err return err
} }
// Scan extracts a PiecePrivateKey from a database field // Scan extracts a PiecePrivateKey from a database field.
func (key *PiecePrivateKey) Scan(src interface{}) (err error) { func (key *PiecePrivateKey) Scan(src interface{}) (err error) {
b, ok := src.([]byte) b, ok := src.([]byte)
if !ok { if !ok {

View file

@ -3,7 +3,7 @@
package storj package storj
// RedundancyScheme specifies the parameters and the algorithm for redundancy // RedundancyScheme specifies the parameters and the algorithm for redundancy.
type RedundancyScheme struct { type RedundancyScheme struct {
// Algorithm determines the algorithm to be used for redundancy. // Algorithm determines the algorithm to be used for redundancy.
Algorithm RedundancyAlgorithm Algorithm RedundancyAlgorithm
@ -25,7 +25,7 @@ type RedundancyScheme struct {
TotalShares int16 TotalShares int16
} }
// IsZero returns true if no field in the struct is set to non-zero value // IsZero returns true if no field in the struct is set to non-zero value.
func (scheme RedundancyScheme) IsZero() bool { func (scheme RedundancyScheme) IsZero() bool {
return scheme == (RedundancyScheme{}) return scheme == (RedundancyScheme{})
} }
@ -59,10 +59,10 @@ func (scheme RedundancyScheme) DownloadNodes() int32 {
return needed return needed
} }
// RedundancyAlgorithm is the algorithm used for redundancy // RedundancyAlgorithm is the algorithm used for redundancy.
type RedundancyAlgorithm byte type RedundancyAlgorithm byte
// List of supported redundancy algorithms // List of supported redundancy algorithms.
const ( const (
InvalidRedundancyAlgorithm = RedundancyAlgorithm(iota) InvalidRedundancyAlgorithm = RedundancyAlgorithm(iota)
ReedSolomon ReedSolomon

View file

@ -3,18 +3,18 @@
package storj package storj
// SegmentPosition segment position in object // SegmentPosition segment position in object.
type SegmentPosition struct { type SegmentPosition struct {
PartNumber int32 PartNumber int32
Index int32 Index int32
} }
// SegmentListItem represents listed segment // SegmentListItem represents listed segment.
type SegmentListItem struct { type SegmentListItem struct {
Position SegmentPosition Position SegmentPosition
} }
// SegmentDownloadInfo represents segment download information inline/remote // SegmentDownloadInfo represents segment download information inline/remote.
type SegmentDownloadInfo struct { type SegmentDownloadInfo struct {
SegmentID SegmentID SegmentID SegmentID
Size int64 Size int64
@ -25,7 +25,7 @@ type SegmentDownloadInfo struct {
SegmentEncryption SegmentEncryption SegmentEncryption SegmentEncryption
} }
// SegmentEncryption represents segment encryption key and nonce // SegmentEncryption represents segment encryption key and nonce.
type SegmentEncryption struct { type SegmentEncryption struct {
EncryptedKeyNonce Nonce EncryptedKeyNonce Nonce
EncryptedKey EncryptedPrivateKey EncryptedKey EncryptedPrivateKey

View file

@ -9,16 +9,16 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// ErrSegmentID is used when something goes wrong with a segment ID // ErrSegmentID is used when something goes wrong with a segment ID.
var ErrSegmentID = errs.Class("segment ID error") var ErrSegmentID = errs.Class("segment ID error")
// segmentIDEncoding is base32 without padding // segmentIDEncoding is base32 without padding.
var segmentIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) var segmentIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding)
// SegmentID is the unique identifier for segment related to object // SegmentID is the unique identifier for segment related to object.
type SegmentID []byte type SegmentID []byte
// SegmentIDFromString decodes an base32 encoded // SegmentIDFromString decodes an base32 encoded.
func SegmentIDFromString(s string) (SegmentID, error) { func SegmentIDFromString(s string) (SegmentID, error) {
idBytes, err := segmentIDEncoding.DecodeString(s) idBytes, err := segmentIDEncoding.DecodeString(s)
if err != nil { if err != nil {
@ -27,7 +27,7 @@ func SegmentIDFromString(s string) (SegmentID, error) {
return SegmentIDFromBytes(idBytes) return SegmentIDFromBytes(idBytes)
} }
// SegmentIDFromBytes converts a byte slice into a segment ID // SegmentIDFromBytes converts a byte slice into a segment ID.
func SegmentIDFromBytes(b []byte) (SegmentID, error) { func SegmentIDFromBytes(b []byte) (SegmentID, error) {
// return error will be used in future implementation // return error will be used in future implementation
id := make([]byte, len(b)) id := make([]byte, len(b))
@ -35,45 +35,45 @@ func SegmentIDFromBytes(b []byte) (SegmentID, error) {
return id, nil return id, nil
} }
// IsZero returns whether segment ID is unassigned // IsZero returns whether segment ID is unassigned.
func (id SegmentID) IsZero() bool { func (id SegmentID) IsZero() bool {
return len(id) == 0 return len(id) == 0
} }
// String representation of the segment ID // String representation of the segment ID.
func (id SegmentID) String() string { return segmentIDEncoding.EncodeToString(id.Bytes()) } func (id SegmentID) String() string { return segmentIDEncoding.EncodeToString(id.Bytes()) }
// Bytes returns bytes of the segment ID // Bytes returns bytes of the segment ID.
func (id SegmentID) Bytes() []byte { return id[:] } func (id SegmentID) Bytes() []byte { return id[:] }
// Marshal serializes a segment ID (implements gogo's custom type interface) // Marshal serializes a segment ID (implements gogo's custom type interface).
func (id SegmentID) Marshal() ([]byte, error) { func (id SegmentID) Marshal() ([]byte, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// MarshalTo serializes a segment ID into the passed byte slice (implements gogo's custom type interface) // MarshalTo serializes a segment ID into the passed byte slice (implements gogo's custom type interface).
func (id *SegmentID) MarshalTo(data []byte) (n int, err error) { func (id *SegmentID) MarshalTo(data []byte) (n int, err error) {
return copy(data, id.Bytes()), nil return copy(data, id.Bytes()), nil
} }
// Unmarshal deserializes a segment ID (implements gogo's custom type interface) // Unmarshal deserializes a segment ID (implements gogo's custom type interface).
func (id *SegmentID) Unmarshal(data []byte) error { func (id *SegmentID) Unmarshal(data []byte) error {
var err error var err error
*id, err = SegmentIDFromBytes(data) *id, err = SegmentIDFromBytes(data)
return err return err
} }
// Size returns the length of a segment ID (implements gogo's custom type interface) // Size returns the length of a segment ID (implements gogo's custom type interface).
func (id SegmentID) Size() int { func (id SegmentID) Size() int {
return len(id) return len(id)
} }
// MarshalJSON serializes a segment ID to a json string as bytes (implements gogo's custom type interface) // MarshalJSON serializes a segment ID to a json string as bytes (implements gogo's custom type interface).
func (id SegmentID) MarshalJSON() ([]byte, error) { func (id SegmentID) MarshalJSON() ([]byte, error) {
return []byte(`"` + id.String() + `"`), nil return []byte(`"` + id.String() + `"`), nil
} }
// UnmarshalJSON deserializes a json string (as bytes) to a segment ID (implements gogo's custom type interface) // UnmarshalJSON deserializes a json string (as bytes) to a segment ID (implements gogo's custom type interface).
func (id *SegmentID) UnmarshalJSON(data []byte) error { func (id *SegmentID) UnmarshalJSON(data []byte) error {
var err error var err error
*id, err = SegmentIDFromString(string(data)) *id, err = SegmentIDFromString(string(data))

View file

@ -10,16 +10,16 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// ErrSerialNumber is used when something goes wrong with a serial number // ErrSerialNumber is used when something goes wrong with a serial number.
var ErrSerialNumber = errs.Class("serial number error") var ErrSerialNumber = errs.Class("serial number error")
// serialNumberEncoding is base32 without padding // serialNumberEncoding is base32 without padding.
var serialNumberEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) var serialNumberEncoding = base32.StdEncoding.WithPadding(base32.NoPadding)
// SerialNumber is the unique identifier for pieces // SerialNumber is the unique identifier for pieces.
type SerialNumber [16]byte type SerialNumber [16]byte
// SerialNumberFromString decodes an base32 encoded // SerialNumberFromString decodes an base32 encoded.
func SerialNumberFromString(s string) (SerialNumber, error) { func SerialNumberFromString(s string) (SerialNumber, error) {
idBytes, err := serialNumberEncoding.DecodeString(s) idBytes, err := serialNumberEncoding.DecodeString(s)
if err != nil { if err != nil {
@ -28,7 +28,7 @@ func SerialNumberFromString(s string) (SerialNumber, error) {
return SerialNumberFromBytes(idBytes) return SerialNumberFromBytes(idBytes)
} }
// SerialNumberFromBytes converts a byte slice into a serial number // SerialNumberFromBytes converts a byte slice into a serial number.
func SerialNumberFromBytes(b []byte) (SerialNumber, error) { func SerialNumberFromBytes(b []byte) (SerialNumber, error) {
if len(b) != len(SerialNumber{}) { if len(b) != len(SerialNumber{}) {
return SerialNumber{}, ErrSerialNumber.New("not enough bytes to make a serial number; have %d, need %d", len(b), len(NodeID{})) return SerialNumber{}, ErrSerialNumber.New("not enough bytes to make a serial number; have %d, need %d", len(b), len(NodeID{}))
@ -39,7 +39,7 @@ func SerialNumberFromBytes(b []byte) (SerialNumber, error) {
return id, nil return id, nil
} }
// IsZero returns whether serial number is unassigned // IsZero returns whether serial number is unassigned.
func (id SerialNumber) IsZero() bool { func (id SerialNumber) IsZero() bool {
return id == SerialNumber{} return id == SerialNumber{}
} }
@ -56,41 +56,41 @@ func (id SerialNumber) Less(other SerialNumber) bool {
return false return false
} }
// String representation of the serial number // String representation of the serial number.
func (id SerialNumber) String() string { return serialNumberEncoding.EncodeToString(id.Bytes()) } func (id SerialNumber) String() string { return serialNumberEncoding.EncodeToString(id.Bytes()) }
// Bytes returns bytes of the serial number // Bytes returns bytes of the serial number.
func (id SerialNumber) Bytes() []byte { return id[:] } func (id SerialNumber) Bytes() []byte { return id[:] }
// Marshal serializes a serial number // Marshal serializes a serial number.
func (id SerialNumber) Marshal() ([]byte, error) { func (id SerialNumber) Marshal() ([]byte, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// MarshalTo serializes a serial number into the passed byte slice // MarshalTo serializes a serial number into the passed byte slice.
func (id *SerialNumber) MarshalTo(data []byte) (n int, err error) { func (id *SerialNumber) MarshalTo(data []byte) (n int, err error) {
n = copy(data, id.Bytes()) n = copy(data, id.Bytes())
return n, nil return n, nil
} }
// Unmarshal deserializes a serial number // Unmarshal deserializes a serial number.
func (id *SerialNumber) Unmarshal(data []byte) error { func (id *SerialNumber) Unmarshal(data []byte) error {
var err error var err error
*id, err = SerialNumberFromBytes(data) *id, err = SerialNumberFromBytes(data)
return err return err
} }
// Size returns the length of a serial number (implements gogo's custom type interface) // Size returns the length of a serial number (implements gogo's custom type interface).
func (id *SerialNumber) Size() int { func (id *SerialNumber) Size() int {
return len(id) return len(id)
} }
// MarshalJSON serializes a serial number to a json string as bytes // MarshalJSON serializes a serial number to a json string as bytes.
func (id SerialNumber) MarshalJSON() ([]byte, error) { func (id SerialNumber) MarshalJSON() ([]byte, error) {
return []byte(`"` + id.String() + `"`), nil return []byte(`"` + id.String() + `"`), nil
} }
// UnmarshalJSON deserializes a json string (as bytes) to a serial number // UnmarshalJSON deserializes a json string (as bytes) to a serial number.
func (id *SerialNumber) UnmarshalJSON(data []byte) error { func (id *SerialNumber) UnmarshalJSON(data []byte) error {
var err error var err error
*id, err = SerialNumberFromString(string(data)) *id, err = SerialNumberFromString(string(data))
@ -100,12 +100,12 @@ func (id *SerialNumber) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// Value set a SerialNumber to a database field // Value set a SerialNumber to a database field.
func (id SerialNumber) Value() (driver.Value, error) { func (id SerialNumber) Value() (driver.Value, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// Scan extracts a SerialNumber from a database field // Scan extracts a SerialNumber from a database field.
func (id *SerialNumber) Scan(src interface{}) (err error) { func (id *SerialNumber) Scan(src interface{}) (err error) {
b, ok := src.([]byte) b, ok := src.([]byte)
if !ok { if !ok {

View file

@ -10,16 +10,16 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// ErrStreamID is used when something goes wrong with a stream ID // ErrStreamID is used when something goes wrong with a stream ID.
var ErrStreamID = errs.Class("stream ID error") var ErrStreamID = errs.Class("stream ID error")
// streamIDEncoding is base32 without padding // streamIDEncoding is base32 without padding.
var streamIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) var streamIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding)
// StreamID is the unique identifier for stream related to object // StreamID is the unique identifier for stream related to object.
type StreamID []byte type StreamID []byte
// StreamIDFromString decodes an base32 encoded // StreamIDFromString decodes an base32 encoded.
func StreamIDFromString(s string) (StreamID, error) { func StreamIDFromString(s string) (StreamID, error) {
idBytes, err := streamIDEncoding.DecodeString(s) idBytes, err := streamIDEncoding.DecodeString(s)
if err != nil { if err != nil {
@ -28,53 +28,53 @@ func StreamIDFromString(s string) (StreamID, error) {
return StreamIDFromBytes(idBytes) return StreamIDFromBytes(idBytes)
} }
// StreamIDFromBytes converts a byte slice into a stream ID // StreamIDFromBytes converts a byte slice into a stream ID.
func StreamIDFromBytes(b []byte) (StreamID, error) { func StreamIDFromBytes(b []byte) (StreamID, error) {
id := make([]byte, len(b)) id := make([]byte, len(b))
copy(id, b) copy(id, b)
return id, nil return id, nil
} }
// IsZero returns whether stream ID is unassigned // IsZero returns whether stream ID is unassigned.
func (id StreamID) IsZero() bool { func (id StreamID) IsZero() bool {
return len(id) == 0 return len(id) == 0
} }
// String representation of the stream ID // String representation of the stream ID.
func (id StreamID) String() string { return streamIDEncoding.EncodeToString(id.Bytes()) } func (id StreamID) String() string { return streamIDEncoding.EncodeToString(id.Bytes()) }
// Bytes returns bytes of the stream ID // Bytes returns bytes of the stream ID.
func (id StreamID) Bytes() []byte { return id[:] } func (id StreamID) Bytes() []byte { return id[:] }
// Marshal serializes a stream ID // Marshal serializes a stream ID.
func (id StreamID) Marshal() ([]byte, error) { func (id StreamID) Marshal() ([]byte, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// MarshalTo serializes a stream ID into the passed byte slice // MarshalTo serializes a stream ID into the passed byte slice.
func (id *StreamID) MarshalTo(data []byte) (n int, err error) { func (id *StreamID) MarshalTo(data []byte) (n int, err error) {
n = copy(data, id.Bytes()) n = copy(data, id.Bytes())
return n, nil return n, nil
} }
// Unmarshal deserializes a stream ID // Unmarshal deserializes a stream ID.
func (id *StreamID) Unmarshal(data []byte) error { func (id *StreamID) Unmarshal(data []byte) error {
var err error var err error
*id, err = StreamIDFromBytes(data) *id, err = StreamIDFromBytes(data)
return err return err
} }
// Size returns the length of a stream ID (implements gogo's custom type interface) // Size returns the length of a stream ID (implements gogo's custom type interface).
func (id StreamID) Size() int { func (id StreamID) Size() int {
return len(id) return len(id)
} }
// MarshalJSON serializes a stream ID to a json string as bytes // MarshalJSON serializes a stream ID to a json string as bytes.
func (id StreamID) MarshalJSON() ([]byte, error) { func (id StreamID) MarshalJSON() ([]byte, error) {
return []byte(`"` + id.String() + `"`), nil return []byte(`"` + id.String() + `"`), nil
} }
// UnmarshalJSON deserializes a json string (as bytes) to a stream ID // UnmarshalJSON deserializes a json string (as bytes) to a stream ID.
func (id *StreamID) UnmarshalJSON(data []byte) error { func (id *StreamID) UnmarshalJSON(data []byte) error {
var err error var err error
*id, err = StreamIDFromString(string(data)) *id, err = StreamIDFromString(string(data))
@ -84,12 +84,12 @@ func (id *StreamID) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// Value set a stream ID to a database field // Value set a stream ID to a database field.
func (id StreamID) Value() (driver.Value, error) { func (id StreamID) Value() (driver.Value, error) {
return id.Bytes(), nil return id.Bytes(), nil
} }
// Scan extracts a stream ID from a database field // Scan extracts a stream ID from a database field.
func (id *StreamID) Scan(src interface{}) (err error) { func (id *StreamID) Scan(src interface{}) (err error) {
b, ok := src.([]byte) b, ok := src.([]byte)
if !ok { if !ok {

View file

@ -16,7 +16,7 @@ type readerFunc func(p []byte) (n int, err error)
func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) }
// Copy implements copying with cancellation // Copy implements copying with cancellation.
func Copy(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { func Copy(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
written, err = io.Copy(dst, readerFunc(func(p []byte) (int, error) { written, err = io.Copy(dst, readerFunc(func(p []byte) (int, error) {

View file

@ -169,7 +169,7 @@ func (cycle *Cycle) Close() {
close(cycle.control) close(cycle.control)
} }
// sendControl sends a control message // sendControl sends a control message.
func (cycle *Cycle) sendControl(message interface{}) { func (cycle *Cycle) sendControl(message interface{}) {
cycle.initialize() cycle.initialize()
select { select {
@ -178,7 +178,7 @@ func (cycle *Cycle) sendControl(message interface{}) {
} }
} }
// Stop stops the cycle permanently // Stop stops the cycle permanently.
func (cycle *Cycle) Stop() { func (cycle *Cycle) Stop() {
cycle.initialize() cycle.initialize()
if atomic.CompareAndSwapInt32(&cycle.stopsent, 0, 1) { if atomic.CompareAndSwapInt32(&cycle.stopsent, 0, 1) {

View file

@ -17,14 +17,14 @@ type Fence struct {
done chan struct{} done chan struct{}
} }
// init sets up the initial lock into wait // init sets up the initial lock into wait.
func (fence *Fence) init() { func (fence *Fence) init() {
fence.setup.Do(func() { fence.setup.Do(func() {
fence.done = make(chan struct{}) fence.done = make(chan struct{})
}) })
} }
// Release releases everyone from Wait // Release releases everyone from Wait.
func (fence *Fence) Release() { func (fence *Fence) Release() {
fence.init() fence.init()
fence.release.Do(func() { close(fence.done) }) fence.release.Do(func() { close(fence.done) })

24
vendor/storj.io/common/sync2/io.go generated vendored
View file

@ -9,32 +9,32 @@ import (
"sync/atomic" "sync/atomic"
) )
// ReadAtWriteAtCloser implements all io.ReaderAt, io.WriterAt and io.Closer // ReadAtWriteAtCloser implements all io.ReaderAt, io.WriterAt and io.Closer.
type ReadAtWriteAtCloser interface { type ReadAtWriteAtCloser interface {
io.ReaderAt io.ReaderAt
io.WriterAt io.WriterAt
io.Closer io.Closer
} }
// PipeWriter allows closing the writer with an error // PipeWriter allows closing the writer with an error.
type PipeWriter interface { type PipeWriter interface {
io.WriteCloser io.WriteCloser
CloseWithError(reason error) error CloseWithError(reason error) error
} }
// PipeReader allows closing the reader with an error // PipeReader allows closing the reader with an error.
type PipeReader interface { type PipeReader interface {
io.ReadCloser io.ReadCloser
CloseWithError(reason error) error CloseWithError(reason error) error
} }
// memory implements ReadAtWriteAtCloser on a memory buffer // memory implements ReadAtWriteAtCloser on a memory buffer.
type memory []byte type memory []byte
// Size returns size of memory buffer // Size returns size of memory buffer.
func (memory memory) Size() int { return len(memory) } func (memory memory) Size() int { return len(memory) }
// ReadAt implements io.ReaderAt methods // ReadAt implements io.ReaderAt methods.
func (memory memory) ReadAt(data []byte, at int64) (amount int, err error) { func (memory memory) ReadAt(data []byte, at int64) (amount int, err error) {
if at > int64(len(memory)) { if at > int64(len(memory)) {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
@ -43,7 +43,7 @@ func (memory memory) ReadAt(data []byte, at int64) (amount int, err error) {
return amount, nil return amount, nil
} }
// WriteAt implements io.WriterAt methods // WriteAt implements io.WriterAt methods.
func (memory memory) WriteAt(data []byte, at int64) (amount int, err error) { func (memory memory) WriteAt(data []byte, at int64) (amount int, err error) {
if at > int64(len(memory)) { if at > int64(len(memory)) {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
@ -52,27 +52,27 @@ func (memory memory) WriteAt(data []byte, at int64) (amount int, err error) {
return amount, nil return amount, nil
} }
// Close implements io.Closer implementation // Close implements io.Closer implementation.
func (memory memory) Close() error { return nil } func (memory memory) Close() error { return nil }
// offsetFile implements ReadAt, WriteAt offset to the file with reference counting // offsetFile implements ReadAt, WriteAt offset to the file with reference counting.
type offsetFile struct { type offsetFile struct {
file *os.File file *os.File
offset int64 offset int64
open *int64 // number of handles open open *int64 // number of handles open
} }
// ReadAt implements io.ReaderAt methods // ReadAt implements io.ReaderAt methods.
func (file offsetFile) ReadAt(data []byte, at int64) (amount int, err error) { func (file offsetFile) ReadAt(data []byte, at int64) (amount int, err error) {
return file.file.ReadAt(data, file.offset+at) return file.file.ReadAt(data, file.offset+at)
} }
// WriteAt implements io.WriterAt methods // WriteAt implements io.WriterAt methods.
func (file offsetFile) WriteAt(data []byte, at int64) (amount int, err error) { func (file offsetFile) WriteAt(data []byte, at int64) (amount int, err error) {
return file.file.WriteAt(data, file.offset+at) return file.file.WriteAt(data, file.offset+at)
} }
// Close implements io.Closer methods // Close implements io.Closer methods.
func (file offsetFile) Close() error { func (file offsetFile) Close() error {
if atomic.AddInt64(file.open, -1) == 0 { if atomic.AddInt64(file.open, -1) == 0 {
return file.file.Close() return file.file.Close()

26
vendor/storj.io/common/sync2/pipe.go generated vendored
View file

@ -11,7 +11,7 @@ import (
"sync" "sync"
) )
// pipe is a io.Reader/io.Writer pipe backed by ReadAtWriteAtCloser // pipe is a io.Reader/io.Writer pipe backed by ReadAtWriteAtCloser.
type pipe struct { type pipe struct {
noCopy noCopy // nolint: structcheck noCopy noCopy // nolint: structcheck
@ -30,7 +30,7 @@ type pipe struct {
readerErr error readerErr error
} }
// NewPipeFile returns a pipe that uses file-system to offload memory // NewPipeFile returns a pipe that uses file-system to offload memory.
func NewPipeFile(tempdir string) (PipeReader, PipeWriter, error) { func NewPipeFile(tempdir string) (PipeReader, PipeWriter, error) {
tempfile, err := ioutil.TempFile(tempdir, "filepipe") tempfile, err := ioutil.TempFile(tempdir, "filepipe")
if err != nil { if err != nil {
@ -50,7 +50,7 @@ func NewPipeFile(tempdir string) (PipeReader, PipeWriter, error) {
return pipeReader{pipe}, pipeWriter{pipe}, nil return pipeReader{pipe}, pipeWriter{pipe}, nil
} }
// NewPipeMemory returns a pipe that uses an in-memory buffer // NewPipeMemory returns a pipe that uses an in-memory buffer.
func NewPipeMemory(pipeSize int64) (PipeReader, PipeWriter, error) { func NewPipeMemory(pipeSize int64) (PipeReader, PipeWriter, error) {
pipe := &pipe{ pipe := &pipe{
buffer: make(memory, pipeSize), buffer: make(memory, pipeSize),
@ -63,13 +63,13 @@ func NewPipeMemory(pipeSize int64) (PipeReader, PipeWriter, error) {
type pipeReader struct{ pipe *pipe } type pipeReader struct{ pipe *pipe }
type pipeWriter struct{ pipe *pipe } type pipeWriter struct{ pipe *pipe }
// Close implements io.Reader Close // Close implements io.Reader Close.
func (reader pipeReader) Close() error { return reader.CloseWithError(nil) } func (reader pipeReader) Close() error { return reader.CloseWithError(nil) }
// Close implements io.Writer Close // Close implements io.Writer Close.
func (writer pipeWriter) Close() error { return writer.CloseWithError(nil) } func (writer pipeWriter) Close() error { return writer.CloseWithError(nil) }
// CloseWithError implements closing with error // CloseWithError implements closing with error.
func (reader pipeReader) CloseWithError(err error) error { func (reader pipeReader) CloseWithError(err error) error {
if err == nil { if err == nil {
err = io.ErrClosedPipe err = io.ErrClosedPipe
@ -88,7 +88,7 @@ func (reader pipeReader) CloseWithError(err error) error {
return pipe.buffer.Close() return pipe.buffer.Close()
} }
// CloseWithError implements closing with error // CloseWithError implements closing with error.
func (writer pipeWriter) CloseWithError(err error) error { func (writer pipeWriter) CloseWithError(err error) error {
if err == nil { if err == nil {
err = io.EOF err = io.EOF
@ -108,7 +108,7 @@ func (writer pipeWriter) CloseWithError(err error) error {
return pipe.buffer.Close() return pipe.buffer.Close()
} }
// Write writes to the pipe returning io.ErrClosedPipe when pipeSize is reached // Write writes to the pipe returning io.ErrClosedPipe when pipeSize is reached.
func (writer pipeWriter) Write(data []byte) (n int, err error) { func (writer pipeWriter) Write(data []byte) (n int, err error) {
pipe := writer.pipe pipe := writer.pipe
pipe.mu.Lock() pipe.mu.Lock()
@ -161,7 +161,7 @@ func (writer pipeWriter) Write(data []byte) (n int, err error) {
return writeAmount, err return writeAmount, err
} }
// Read reads from the pipe returning io.EOF when writer is closed or pipeSize is reached // Read reads from the pipe returning io.EOF when writer is closed or pipeSize is reached.
func (reader pipeReader) Read(data []byte) (n int, err error) { func (reader pipeReader) Read(data []byte) (n int, err error) {
pipe := reader.pipe pipe := reader.pipe
pipe.mu.Lock() pipe.mu.Lock()
@ -214,13 +214,13 @@ func (reader pipeReader) Read(data []byte) (n int, err error) {
return readAmount, err return readAmount, err
} }
// MultiPipe is a multipipe backed by a single file // MultiPipe is a multipipe backed by a single file.
type MultiPipe struct { type MultiPipe struct {
pipes []pipe pipes []pipe
} }
// NewMultiPipeFile returns a new MultiPipe that is created in tempdir // NewMultiPipeFile returns a new MultiPipe that is created in tempdir
// if tempdir == "" the fill will be created it into os.TempDir // if tempdir == "" the fill will be created it into os.TempDir.
func NewMultiPipeFile(tempdir string, pipeCount, pipeSize int64) (*MultiPipe, error) { func NewMultiPipeFile(tempdir string, pipeCount, pipeSize int64) (*MultiPipe, error) {
tempfile, err := ioutil.TempFile(tempdir, "multifilepipe") tempfile, err := ioutil.TempFile(tempdir, "multifilepipe")
if err != nil { if err != nil {
@ -255,7 +255,7 @@ func NewMultiPipeFile(tempdir string, pipeCount, pipeSize int64) (*MultiPipe, er
return multipipe, nil return multipipe, nil
} }
// NewMultiPipeMemory returns a new MultiPipe that is using a memory buffer // NewMultiPipeMemory returns a new MultiPipe that is using a memory buffer.
func NewMultiPipeMemory(pipeCount, pipeSize int64) (*MultiPipe, error) { func NewMultiPipeMemory(pipeCount, pipeSize int64) (*MultiPipe, error) {
buffer := make(memory, pipeCount*pipeSize) buffer := make(memory, pipeCount*pipeSize)
@ -273,7 +273,7 @@ func NewMultiPipeMemory(pipeCount, pipeSize int64) (*MultiPipe, error) {
return multipipe, nil return multipipe, nil
} }
// Pipe returns the two ends of a block stream pipe // Pipe returns the two ends of a block stream pipe.
func (multipipe *MultiPipe) Pipe(index int) (PipeReader, PipeWriter) { func (multipipe *MultiPipe) Pipe(index int) (PipeReader, PipeWriter) {
pipe := &multipipe.pipes[index] pipe := &multipipe.pipes[index]
return pipeReader{pipe}, pipeWriter{pipe} return pipeReader{pipe}, pipeWriter{pipe}

View file

@ -9,7 +9,7 @@ import (
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
) )
// Semaphore implements a closable semaphore // Semaphore implements a closable semaphore.
type Semaphore struct { type Semaphore struct {
noCopy noCopy // nolint: structcheck noCopy noCopy // nolint: structcheck

View file

@ -8,7 +8,7 @@ import (
"time" "time"
) )
// Sleep implements sleeping with cancellation // Sleep implements sleeping with cancellation.
func Sleep(ctx context.Context, duration time.Duration) bool { func Sleep(ctx context.Context, duration time.Duration) bool {
timer := time.NewTimer(duration) timer := time.NewTimer(duration)
defer timer.Stop() defer timer.Stop()

14
vendor/storj.io/common/sync2/tee.go generated vendored
View file

@ -28,7 +28,7 @@ type tee struct {
writerErr error writerErr error
} }
// NewTeeFile returns a tee that uses file-system to offload memory // NewTeeFile returns a tee that uses file-system to offload memory.
func NewTeeFile(readers int, tempdir string) ([]PipeReader, PipeWriter, error) { func NewTeeFile(readers int, tempdir string) ([]PipeReader, PipeWriter, error) {
file, err := tmpfile.New(tempdir, "tee") file, err := tmpfile.New(tempdir, "tee")
if err != nil { if err != nil {
@ -45,7 +45,7 @@ func NewTeeFile(readers int, tempdir string) ([]PipeReader, PipeWriter, error) {
return newTee(buffer, readers, &handles) return newTee(buffer, readers, &handles)
} }
// NewTeeInmemory returns a tee that uses inmemory // NewTeeInmemory returns a tee that uses inmemory.
func NewTeeInmemory(readers int, allocMemory int64) ([]PipeReader, PipeWriter, error) { func NewTeeInmemory(readers int, allocMemory int64) ([]PipeReader, PipeWriter, error) {
handles := int64(readers + 1) // +1 for the writer handles := int64(readers + 1) // +1 for the writer
memory := memory(make([]byte, allocMemory)) memory := memory(make([]byte, allocMemory))
@ -124,7 +124,7 @@ func (reader *teeReader) Read(data []byte) (n int, err error) {
return readAmount, err return readAmount, err
} }
// Write writes to the buffer returning io.ErrClosedPipe when limit is reached // Write writes to the buffer returning io.ErrClosedPipe when limit is reached.
// //
// It will block until at least one reader require the data. // It will block until at least one reader require the data.
func (writer *teeWriter) Write(data []byte) (n int, err error) { func (writer *teeWriter) Write(data []byte) (n int, err error) {
@ -163,13 +163,13 @@ func (writer *teeWriter) Write(data []byte) (n int, err error) {
return writeAmount, err return writeAmount, err
} }
// Close implements io.Reader Close // Close implements io.Reader Close.
func (reader *teeReader) Close() error { return reader.CloseWithError(nil) } func (reader *teeReader) Close() error { return reader.CloseWithError(nil) }
// Close implements io.Writer Close // Close implements io.Writer Close.
func (writer *teeWriter) Close() error { return writer.CloseWithError(nil) } func (writer *teeWriter) Close() error { return writer.CloseWithError(nil) }
// CloseWithError implements closing with error // CloseWithError implements closing with error.
func (reader *teeReader) CloseWithError(reason error) (err error) { func (reader *teeReader) CloseWithError(reason error) (err error) {
tee := reader.tee tee := reader.tee
if atomic.CompareAndSwapInt32(&reader.closed, 0, 1) { if atomic.CompareAndSwapInt32(&reader.closed, 0, 1) {
@ -183,7 +183,7 @@ func (reader *teeReader) CloseWithError(reason error) (err error) {
return err return err
} }
// CloseWithError implements closing with error // CloseWithError implements closing with error.
func (writer *teeWriter) CloseWithError(reason error) error { func (writer *teeWriter) CloseWithError(reason error) error {
if reason == nil { if reason == nil {
reason = io.EOF reason = io.EOF

Some files were not shown because too many files have changed in this diff Show more