Merge branch 'release/1.1.0'

This commit is contained in:
Leonard Lyubich 2020-06-18 17:26:15 +03:00
commit 2456521240
31 changed files with 3937 additions and 208 deletions

View file

@ -1,6 +1,23 @@
# Changelog # Changelog
This is the changelog for NeoFS-API-Go This is the changelog for NeoFS-API-Go
## [1.1.0] - 2020-06-18
### Added
- `container.SetExtendedACL` rpc.
- `container.GetExtendedACL` rpc.
- Bearer token to all request messages.
- X-headers to all request messages.
### Changed
- Implementation and signatures of Sign/Verify request functions.
### Updated
- NeoFS API v1.0.0 => 1.1.0
## [1.0.0] - 2020-05-26 ## [1.0.0] - 2020-05-26
- Bump major release - Bump major release
@ -339,3 +356,4 @@ Initial public release
[0.7.5]: https://github.com/nspcc-dev/neofs-api-go/compare/v0.7.4...v0.7.5 [0.7.5]: https://github.com/nspcc-dev/neofs-api-go/compare/v0.7.4...v0.7.5
[0.7.6]: https://github.com/nspcc-dev/neofs-api-go/compare/v0.7.5...v0.7.6 [0.7.6]: https://github.com/nspcc-dev/neofs-api-go/compare/v0.7.5...v0.7.6
[1.0.0]: https://github.com/nspcc-dev/neofs-api-go/compare/v0.7.6...v1.0.0 [1.0.0]: https://github.com/nspcc-dev/neofs-api-go/compare/v0.7.6...v1.0.0
[1.1.0]: https://github.com/nspcc-dev/neofs-api-go/compare/v1.0.0...v1.1.0

View file

@ -1,4 +1,4 @@
PROTO_VERSION=v1.0.0 PROTO_VERSION=v1.1.0
PROTO_URL=https://github.com/nspcc-dev/neofs-api/archive/$(PROTO_VERSION).tar.gz PROTO_URL=https://github.com/nspcc-dev/neofs-api/archive/$(PROTO_VERSION).tar.gz
B=\033[0;1m B=\033[0;1m

View file

@ -11,6 +11,12 @@
NeoFS API repository contains implementation of core NeoFS structures that NeoFS API repository contains implementation of core NeoFS structures that
can be used for integration with NeoFS. can be used for integration with NeoFS.
## Сompatibility
[neofs-api v1.1.0]: https://github.com/nspcc-dev/neofs-api/releases/tag/v1.1.0
[neofs-api-go v1.1.0]: https://github.com/nspcc-dev/neofs-api-go/releases/tag/v1.1.0
[neofs-api-go v1.1.0] supports [neofs-api v1.1.0]
## Description ## Description
Repository contains 13 packages that implement NeoFS core structures. These Repository contains 13 packages that implement NeoFS core structures. These

View file

@ -13,7 +13,7 @@ func TestSignBalanceRequest(t *testing.T) {
sk := test.DecodeKey(0) sk := test.DecodeKey(0)
type sigType interface { type sigType interface {
service.SignedDataWithToken service.RequestData
service.SignKeyPairAccumulator service.SignKeyPairAccumulator
service.SignKeyPairSource service.SignKeyPairSource
SetToken(*service.Token) SetToken(*service.Token)
@ -159,26 +159,26 @@ func TestSignBalanceRequest(t *testing.T) {
token := new(service.Token) token := new(service.Token)
v.SetToken(token) v.SetToken(token)
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1)) token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
{ // payload corruptions { // payload corruptions
for _, corruption := range item.payloadCorrupt { for _, corruption := range item.payloadCorrupt {
v := item.constructor() v := item.constructor()
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
corruption(v) corruption(v)
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
} }
} }

View file

@ -12,7 +12,7 @@ func TestRequestSign(t *testing.T) {
sk := test.DecodeKey(0) sk := test.DecodeKey(0)
type sigType interface { type sigType interface {
service.SignedDataWithToken service.RequestData
service.SignKeyPairAccumulator service.SignKeyPairAccumulator
service.SignKeyPairSource service.SignKeyPairSource
SetToken(*service.Token) SetToken(*service.Token)
@ -56,26 +56,26 @@ func TestRequestSign(t *testing.T) {
token := new(service.Token) token := new(service.Token)
v.SetToken(token) v.SetToken(token)
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1)) token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
{ // payload corruptions { // payload corruptions
for _, corruption := range item.payloadCorrupt { for _, corruption := range item.payloadCorrupt {
v := item.constructor() v := item.constructor()
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
corruption(v) corruption(v)
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
} }
} }

File diff suppressed because it is too large Load diff

View file

@ -27,6 +27,12 @@ service Service {
// List returns all user's containers // List returns all user's containers
rpc List(ListRequest) returns (ListResponse); rpc List(ListRequest) returns (ListResponse);
// SetExtendedACL changes extended ACL rules of the container
rpc SetExtendedACL(SetExtendedACLRequest) returns (SetExtendedACLResponse);
// GetExtendedACL returns extended ACL rules of the container
rpc GetExtendedACL(GetExtendedACLRequest) returns (GetExtendedACLResponse);
} }
message PutRequest { message PutRequest {
@ -99,3 +105,42 @@ message ListResponse {
// CID (container id) is list of SHA256 hashes of the container structures // CID (container id) is list of SHA256 hashes of the container structures
repeated bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false]; repeated bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
} }
message ExtendedACLKey {
// ID (container id) is a SHA256 hash of the container structure
bytes ID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
}
message ExtendedACLValue {
// EACL carries binary representation of the table of extended ACL rules
bytes EACL = 1;
// Signature carries EACL field signature
bytes Signature = 2;
}
message SetExtendedACLRequest {
// Key carries key to extended ACL information
ExtendedACLKey Key = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// Value carries extended ACL information
ExtendedACLValue Value = 2 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message SetExtendedACLResponse {}
message GetExtendedACLRequest {
// Key carries key to extended ACL information
ExtendedACLKey Key = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetExtendedACLResponse {
// ACL carries extended ACL information
ExtendedACLValue ACL = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}

View file

@ -135,3 +135,60 @@ func (m ListRequest) ReadSignedData(p []byte) (int, error) {
return off, nil return off, nil
} }
// SignedData returns payload bytes of the request.
func (m GetExtendedACLRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m GetExtendedACLRequest) SignedDataSize() int {
return m.GetID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m GetExtendedACLRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetID().Bytes())
return off, nil
}
// SignedData returns payload bytes of the request.
func (m SetExtendedACLRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m SetExtendedACLRequest) SignedDataSize() int {
return 0 +
m.GetID().Size() +
len(m.GetEACL()) +
len(m.GetSignature())
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m SetExtendedACLRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetID().Bytes())
off += copy(p[off:], m.GetEACL())
off += copy(p[off:], m.GetSignature())
return off, nil
}

View file

@ -12,7 +12,7 @@ func TestRequestSign(t *testing.T) {
sk := test.DecodeKey(0) sk := test.DecodeKey(0)
type sigType interface { type sigType interface {
service.SignedDataWithToken service.RequestData
service.SignKeyPairAccumulator service.SignKeyPairAccumulator
service.SignKeyPairSource service.SignKeyPairSource
SetToken(*service.Token) SetToken(*service.Token)
@ -108,6 +108,50 @@ func TestRequestSign(t *testing.T) {
}, },
}, },
}, },
{ // GetExtendedACLRequest
constructor: func() sigType {
return new(GetExtendedACLRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*GetExtendedACLRequest)
id := req.GetID()
id[0]++
req.SetID(id)
},
},
},
{ // SetExtendedACLRequest
constructor: func() sigType {
return new(SetExtendedACLRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*SetExtendedACLRequest)
id := req.GetID()
id[0]++
req.SetID(id)
},
func(s sigType) {
req := s.(*SetExtendedACLRequest)
req.SetEACL(
append(req.GetEACL(), 1),
)
},
func(s sigType) {
req := s.(*SetExtendedACLRequest)
req.SetSignature(
append(req.GetSignature(), 1),
)
},
},
},
} }
for _, item := range items { for _, item := range items {
@ -117,26 +161,26 @@ func TestRequestSign(t *testing.T) {
token := new(service.Token) token := new(service.Token)
v.SetToken(token) v.SetToken(token)
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1)) token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
{ // payload corruptions { // payload corruptions
for _, corruption := range item.payloadCorrupt { for _, corruption := range item.payloadCorrupt {
v := item.constructor() v := item.constructor()
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
corruption(v) corruption(v)
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
} }
} }

View file

@ -158,3 +158,23 @@ func (m ListRequest) GetOwnerID() OwnerID {
func (m *ListRequest) SetOwnerID(owner OwnerID) { func (m *ListRequest) SetOwnerID(owner OwnerID) {
m.OwnerID = owner m.OwnerID = owner
} }
// GetID is an ID field getter.
func (m ExtendedACLKey) GetID() CID {
return m.ID
}
// SetID is an ID field setter.
func (m *ExtendedACLKey) SetID(v CID) {
m.ID = v
}
// SetEACL is an EACL field setter.
func (m *ExtendedACLValue) SetEACL(v []byte) {
m.EACL = v
}
// SetSignature is a Signature field setter.
func (m *ExtendedACLValue) SetSignature(sig []byte) {
m.Signature = sig
}

View file

@ -140,3 +140,23 @@ func TestListRequestGettersSetters(t *testing.T) {
require.Equal(t, owner, m.GetOwnerID()) require.Equal(t, owner, m.GetOwnerID())
}) })
} }
func TestExtendedACLKey(t *testing.T) {
s := new(ExtendedACLKey)
id := CID{1, 2, 3}
s.SetID(id)
require.Equal(t, id, s.GetID())
}
func TestExtendedACLValue(t *testing.T) {
s := new(ExtendedACLValue)
acl := []byte{1, 2, 3}
s.SetEACL(acl)
require.Equal(t, acl, s.GetEACL())
sig := []byte{4, 5, 6}
s.SetSignature(sig)
require.Equal(t, sig, s.GetSignature())
}

View file

@ -10,12 +10,18 @@
- Messages - Messages
- [DeleteRequest](#container.DeleteRequest) - [DeleteRequest](#container.DeleteRequest)
- [DeleteResponse](#container.DeleteResponse) - [DeleteResponse](#container.DeleteResponse)
- [ExtendedACLKey](#container.ExtendedACLKey)
- [ExtendedACLValue](#container.ExtendedACLValue)
- [GetExtendedACLRequest](#container.GetExtendedACLRequest)
- [GetExtendedACLResponse](#container.GetExtendedACLResponse)
- [GetRequest](#container.GetRequest) - [GetRequest](#container.GetRequest)
- [GetResponse](#container.GetResponse) - [GetResponse](#container.GetResponse)
- [ListRequest](#container.ListRequest) - [ListRequest](#container.ListRequest)
- [ListResponse](#container.ListResponse) - [ListResponse](#container.ListResponse)
- [PutRequest](#container.PutRequest) - [PutRequest](#container.PutRequest)
- [PutResponse](#container.PutResponse) - [PutResponse](#container.PutResponse)
- [SetExtendedACLRequest](#container.SetExtendedACLRequest)
- [SetExtendedACLResponse](#container.SetExtendedACLResponse)
- [container/types.proto](#container/types.proto) - [container/types.proto](#container/types.proto)
@ -46,6 +52,8 @@ rpc Put(PutRequest) returns (PutResponse);
rpc Delete(DeleteRequest) returns (DeleteResponse); rpc Delete(DeleteRequest) returns (DeleteResponse);
rpc Get(GetRequest) returns (GetResponse); rpc Get(GetRequest) returns (GetResponse);
rpc List(ListRequest) returns (ListResponse); rpc List(ListRequest) returns (ListResponse);
rpc SetExtendedACL(SetExtendedACLRequest) returns (SetExtendedACLResponse);
rpc GetExtendedACL(GetExtendedACLRequest) returns (GetExtendedACLResponse);
``` ```
@ -80,6 +88,20 @@ List returns all user's containers
| Name | Input | Output | | Name | Input | Output |
| ---- | ----- | ------ | | ---- | ----- | ------ |
| List | [ListRequest](#container.ListRequest) | [ListResponse](#container.ListResponse) | | List | [ListRequest](#container.ListRequest) | [ListResponse](#container.ListResponse) |
#### Method SetExtendedACL
SetExtendedACL changes extended ACL rules of the container
| Name | Input | Output |
| ---- | ----- | ------ |
| SetExtendedACL | [SetExtendedACLRequest](#container.SetExtendedACLRequest) | [SetExtendedACLResponse](#container.SetExtendedACLResponse) |
#### Method GetExtendedACL
GetExtendedACL returns extended ACL rules of the container
| Name | Input | Output |
| ---- | ----- | ------ |
| GetExtendedACL | [GetExtendedACLRequest](#container.GetExtendedACLRequest) | [GetExtendedACLResponse](#container.GetExtendedACLResponse) |
<!-- end services --> <!-- end services -->
@ -104,6 +126,53 @@ via consensus in inner ring nodes
<a name="container.ExtendedACLKey"></a>
### Message ExtendedACLKey
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| ID | [bytes](#bytes) | | ID (container id) is a SHA256 hash of the container structure |
<a name="container.ExtendedACLValue"></a>
### Message ExtendedACLValue
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| EACL | [bytes](#bytes) | | EACL carries binary representation of the table of extended ACL rules |
| Signature | [bytes](#bytes) | | Signature carries EACL field signature |
<a name="container.GetExtendedACLRequest"></a>
### Message GetExtendedACLRequest
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| Key | [ExtendedACLKey](#container.ExtendedACLKey) | | Key carries key to extended ACL information |
| Meta | [service.RequestMetaHeader](#service.RequestMetaHeader) | | RequestMetaHeader contains information about request meta headers (should be embedded into message) |
| Verify | [service.RequestVerificationHeader](#service.RequestVerificationHeader) | | RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message) |
<a name="container.GetExtendedACLResponse"></a>
### Message GetExtendedACLResponse
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| ACL | [ExtendedACLValue](#container.ExtendedACLValue) | | ACL carries extended ACL information |
<a name="container.GetRequest"></a> <a name="container.GetRequest"></a>
### Message GetRequest ### Message GetRequest
@ -179,6 +248,27 @@ via consensus in inner ring nodes
| ----- | ---- | ----- | ----------- | | ----- | ---- | ----- | ----------- |
| CID | [bytes](#bytes) | | CID (container id) is a SHA256 hash of the container structure | | CID | [bytes](#bytes) | | CID (container id) is a SHA256 hash of the container structure |
<a name="container.SetExtendedACLRequest"></a>
### Message SetExtendedACLRequest
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| Key | [ExtendedACLKey](#container.ExtendedACLKey) | | Key carries key to extended ACL information |
| Value | [ExtendedACLValue](#container.ExtendedACLValue) | | Value carries extended ACL information |
| Meta | [service.RequestMetaHeader](#service.RequestMetaHeader) | | RequestMetaHeader contains information about request meta headers (should be embedded into message) |
| Verify | [service.RequestVerificationHeader](#service.RequestVerificationHeader) | | RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message) |
<a name="container.SetExtendedACLResponse"></a>
### Message SetExtendedACLResponse
<!-- end messages --> <!-- end messages -->
<!-- end enums --> <!-- end enums -->

View file

@ -6,6 +6,8 @@
- [service/meta.proto](#service/meta.proto) - [service/meta.proto](#service/meta.proto)
- Messages - Messages
- [RequestExtendedHeader](#service.RequestExtendedHeader)
- [RequestExtendedHeader.KV](#service.RequestExtendedHeader.KV)
- [RequestMetaHeader](#service.RequestMetaHeader) - [RequestMetaHeader](#service.RequestMetaHeader)
- [ResponseMetaHeader](#service.ResponseMetaHeader) - [ResponseMetaHeader](#service.ResponseMetaHeader)
@ -13,6 +15,8 @@
- [service/verify.proto](#service/verify.proto) - [service/verify.proto](#service/verify.proto)
- Messages - Messages
- [BearerTokenMsg](#service.BearerTokenMsg)
- [BearerTokenMsg.Info](#service.BearerTokenMsg.Info)
- [RequestVerificationHeader](#service.RequestVerificationHeader) - [RequestVerificationHeader](#service.RequestVerificationHeader)
- [RequestVerificationHeader.Signature](#service.RequestVerificationHeader.Signature) - [RequestVerificationHeader.Signature](#service.RequestVerificationHeader.Signature)
- [Token](#service.Token) - [Token](#service.Token)
@ -39,6 +43,29 @@
<!-- end services --> <!-- end services -->
<a name="service.RequestExtendedHeader"></a>
### Message RequestExtendedHeader
RequestExtendedHeader contains extended headers of request
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| Headers | [RequestExtendedHeader.KV](#service.RequestExtendedHeader.KV) | repeated | Headers carries list of key-value headers |
<a name="service.RequestExtendedHeader.KV"></a>
### Message RequestExtendedHeader.KV
KV contains string key-value pair
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| K | [string](#string) | | K carries extended header key |
| V | [string](#string) | | V carries extended header value |
<a name="service.RequestMetaHeader"></a> <a name="service.RequestMetaHeader"></a>
### Message RequestMetaHeader ### Message RequestMetaHeader
@ -52,6 +79,7 @@ RequestMetaHeader contains information about request meta headers
| Epoch | [uint64](#uint64) | | Epoch for user can be empty, because node sets epoch to the actual value | | Epoch | [uint64](#uint64) | | Epoch for user can be empty, because node sets epoch to the actual value |
| Version | [uint32](#uint32) | | Version defines protocol version TODO: not used for now, should be implemented in future | | Version | [uint32](#uint32) | | Version defines protocol version TODO: not used for now, should be implemented in future |
| Raw | [bool](#bool) | | Raw determines whether the request is raw or not | | Raw | [bool](#bool) | | Raw determines whether the request is raw or not |
| ExtendedHeader | [RequestExtendedHeader](#service.RequestExtendedHeader) | | ExtendedHeader carries extended headers of the request |
<a name="service.ResponseMetaHeader"></a> <a name="service.ResponseMetaHeader"></a>
@ -81,6 +109,32 @@ ResponseMetaHeader contains meta information based on request processing by serv
<!-- end services --> <!-- end services -->
<a name="service.BearerTokenMsg"></a>
### Message BearerTokenMsg
BearerTokenMsg carries information about request ACL rules with limited lifetime
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| TokenInfo | [BearerTokenMsg.Info](#service.BearerTokenMsg.Info) | | TokenInfo is a grouped information about token |
| OwnerKey | [bytes](#bytes) | | OwnerKey is a public key of the token owner |
| Signature | [bytes](#bytes) | | Signature is a signature of token information |
<a name="service.BearerTokenMsg.Info"></a>
### Message BearerTokenMsg.Info
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| ACLRules | [bytes](#bytes) | | ACLRules carries a binary representation of the table of extended ACL rules |
| OwnerID | [bytes](#bytes) | | OwnerID is an owner of token |
| ValidUntil | [uint64](#uint64) | | ValidUntil carries a last epoch of token lifetime |
<a name="service.RequestVerificationHeader"></a> <a name="service.RequestVerificationHeader"></a>
### Message RequestVerificationHeader ### Message RequestVerificationHeader
@ -92,6 +146,7 @@ RequestVerificationHeader is a set of signatures of every NeoFS Node that proces
| ----- | ---- | ----- | ----------- | | ----- | ---- | ----- | ----------- |
| Signatures | [RequestVerificationHeader.Signature](#service.RequestVerificationHeader.Signature) | repeated | Signatures is a set of signatures of every passed NeoFS Node | | Signatures | [RequestVerificationHeader.Signature](#service.RequestVerificationHeader.Signature) | repeated | Signatures is a set of signatures of every passed NeoFS Node |
| Token | [Token](#service.Token) | | Token is a token of the session within which the request is sent | | Token | [Token](#service.Token) | | Token is a token of the session within which the request is sent |
| Bearer | [BearerTokenMsg](#service.BearerTokenMsg) | | Bearer is a Bearer token of the request |
<a name="service.RequestVerificationHeader.Signature"></a> <a name="service.RequestVerificationHeader.Signature"></a>

View file

@ -13,7 +13,7 @@ func TestSignVerifyRequests(t *testing.T) {
sk := test.DecodeKey(0) sk := test.DecodeKey(0)
type sigType interface { type sigType interface {
service.SignedDataWithToken service.RequestData
service.SignKeyPairAccumulator service.SignKeyPairAccumulator
service.SignKeyPairSource service.SignKeyPairSource
SetToken(*Token) SetToken(*Token)
@ -164,26 +164,26 @@ func TestSignVerifyRequests(t *testing.T) {
token := new(Token) token := new(Token)
v.SetToken(token) v.SetToken(token)
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1)) token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
{ // payload corruptions { // payload corruptions
for _, corruption := range item.payloadCorrupt { for _, corruption := range item.payloadCorrupt {
v := item.constructor() v := item.constructor()
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
corruption(v) corruption(v)
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
} }
} }

140
service/bearer.go Normal file
View file

@ -0,0 +1,140 @@
package service
import (
"crypto/ecdsa"
"io"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
)
type signedBearerToken struct {
BearerToken
}
type bearerMsgWrapper struct {
*BearerTokenMsg
}
const fixedBearerTokenDataSize = 0 +
refs.OwnerIDSize +
8
// NewSignedBearerToken wraps passed BearerToken in a component suitable for signing.
//
// Result can be used in AddSignatureWithKey function.
func NewSignedBearerToken(token BearerToken) DataWithSignKeyAccumulator {
return &signedBearerToken{
BearerToken: token,
}
}
// NewVerifiedBearerToken wraps passed SessionToken in a component suitable for signature verification.
//
// Result can be used in VerifySignatureWithKey function.
func NewVerifiedBearerToken(token BearerToken) DataWithSignature {
return &signedBearerToken{
BearerToken: token,
}
}
// AddSignKey calls a Signature field setter and an OwnerKey field setter with corresponding arguments.
func (s signedBearerToken) AddSignKey(sig []byte, key *ecdsa.PublicKey) {
if s.BearerToken != nil {
s.SetSignature(sig)
s.SetOwnerKey(
crypto.MarshalPublicKey(key),
)
}
}
// SignedData returns token information in a binary representation.
func (s signedBearerToken) SignedData() ([]byte, error) {
return SignedDataFromReader(s)
}
// SignedDataSize returns the length of signed token information slice.
func (s signedBearerToken) SignedDataSize() int {
return bearerTokenInfoSize(s.BearerToken)
}
// ReadSignedData copies a binary representation of the token information to passed buffer.
//
// If buffer length is less than required, io.ErrUnexpectedEOF returns.
func (s signedBearerToken) ReadSignedData(p []byte) (int, error) {
sz := s.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
copyBearerTokenSignedData(p, s.BearerToken)
return sz, nil
}
func bearerTokenInfoSize(v ACLRulesSource) int {
if v == nil {
return 0
}
return fixedBearerTokenDataSize + len(v.GetACLRules())
}
// Fills passed buffer with signing token information bytes.
// Does not check buffer length, it is understood that enough space is allocated in it.
//
// If passed BearerTokenInfo, buffer remains unchanged.
func copyBearerTokenSignedData(buf []byte, token BearerTokenInfo) {
if token == nil {
return
}
var off int
off += copy(buf[off:], token.GetACLRules())
off += copy(buf[off:], token.GetOwnerID().Bytes())
tokenEndianness.PutUint64(buf[off:], token.ExpirationEpoch())
off += 8
}
// SetACLRules is an ACLRules field setter.
func (m *BearerTokenMsg_Info) SetACLRules(v []byte) {
m.ACLRules = v
}
// SetValidUntil is a ValidUntil field setter.
func (m *BearerTokenMsg_Info) SetValidUntil(v uint64) {
m.ValidUntil = v
}
// GetOwnerID if an OwnerID field getter.
func (m BearerTokenMsg_Info) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *BearerTokenMsg_Info) SetOwnerID(v OwnerID) {
m.OwnerID = v
}
// ExpirationEpoch returns the result of ValidUntil field getter.
func (m BearerTokenMsg_Info) ExpirationEpoch() uint64 {
return m.GetValidUntil()
}
// SetExpirationEpoch passes argument to ValidUntil field setter.
func (m *BearerTokenMsg_Info) SetExpirationEpoch(v uint64) {
m.SetValidUntil(v)
}
// SetOwnerKey is an OwnerKey field setter.
func (m *BearerTokenMsg) SetOwnerKey(v []byte) {
m.OwnerKey = v
}
// SetSignature is a Signature field setter.
func (m *BearerTokenMsg) SetSignature(v []byte) {
m.Signature = v
}

199
service/bearer_test.go Normal file
View file

@ -0,0 +1,199 @@
package service
import (
"crypto/rand"
"testing"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
type testBearerToken struct {
aclRules []byte
expEpoch uint64
owner OwnerID
key []byte
sig []byte
}
func (s testBearerToken) GetACLRules() []byte {
return s.aclRules
}
func (s *testBearerToken) SetACLRules(v []byte) {
s.aclRules = v
}
func (s testBearerToken) ExpirationEpoch() uint64 {
return s.expEpoch
}
func (s *testBearerToken) SetExpirationEpoch(v uint64) {
s.expEpoch = v
}
func (s testBearerToken) GetOwnerID() OwnerID {
return s.owner
}
func (s *testBearerToken) SetOwnerID(v OwnerID) {
s.owner = v
}
func (s testBearerToken) GetOwnerKey() []byte {
return s.key
}
func (s *testBearerToken) SetOwnerKey(v []byte) {
s.key = v
}
func (s testBearerToken) GetSignature() []byte {
return s.sig
}
func (s *testBearerToken) SetSignature(v []byte) {
s.sig = v
}
func TestBearerTokenMsgGettersSetters(t *testing.T) {
var tok BearerToken = new(testBearerToken)
{ // ACLRules
rules := []byte{1, 2, 3}
tok.SetACLRules(rules)
require.Equal(t, rules, tok.GetACLRules())
}
{ // OwnerID
ownerID := OwnerID{}
_, err := rand.Read(ownerID[:])
require.NoError(t, err)
tok.SetOwnerID(ownerID)
require.Equal(t, ownerID, tok.GetOwnerID())
}
{ // ValidUntil
e := uint64(5)
tok.SetExpirationEpoch(e)
require.Equal(t, e, tok.ExpirationEpoch())
}
{ // OwnerKey
key := make([]byte, 10)
_, err := rand.Read(key)
require.NoError(t, err)
tok.SetOwnerKey(key)
require.Equal(t, key, tok.GetOwnerKey())
}
{ // Signature
sig := make([]byte, 10)
_, err := rand.Read(sig)
require.NoError(t, err)
tok.SetSignature(sig)
require.Equal(t, sig, tok.GetSignature())
}
}
func TestSignVerifyBearerToken(t *testing.T) {
var token BearerToken = new(testBearerToken)
// create private key for signing
sk := test.DecodeKey(0)
pk := &sk.PublicKey
rules := []byte{1, 2, 3}
token.SetACLRules(rules)
ownerID := OwnerID{}
_, err := rand.Read(ownerID[:])
require.NoError(t, err)
token.SetOwnerID(ownerID)
fEpoch := uint64(2)
token.SetExpirationEpoch(fEpoch)
signedToken := NewSignedBearerToken(token)
verifiedToken := NewVerifiedBearerToken(token)
// sign and verify token
require.NoError(t, AddSignatureWithKey(sk, signedToken))
require.NoError(t, VerifySignatureWithKey(pk, verifiedToken))
items := []struct {
corrupt func()
restore func()
}{
{ // ACLRules
corrupt: func() {
token.SetACLRules(append(rules, 1))
},
restore: func() {
token.SetACLRules(rules)
},
},
{ // Owner ID
corrupt: func() {
ownerID[0]++
token.SetOwnerID(ownerID)
},
restore: func() {
ownerID[0]--
token.SetOwnerID(ownerID)
},
},
{ // Expiration epoch
corrupt: func() {
token.SetExpirationEpoch(fEpoch + 1)
},
restore: func() {
token.SetExpirationEpoch(fEpoch)
},
},
}
for _, v := range items {
v.corrupt()
require.Error(t, VerifySignatureWithKey(pk, verifiedToken))
v.restore()
require.NoError(t, VerifySignatureWithKey(pk, verifiedToken))
}
}
func TestBearerTokenMsg_Setters(t *testing.T) {
s := new(BearerTokenMsg)
aclRules := []byte{1, 2, 3}
s.SetACLRules(aclRules)
require.Equal(t, aclRules, s.GetACLRules())
validUntil := uint64(6)
s.SetValidUntil(validUntil)
require.Equal(t, validUntil, s.GetValidUntil())
s.SetExpirationEpoch(validUntil + 1)
require.Equal(t, validUntil+1, s.ExpirationEpoch())
ownerID := OwnerID{1, 2, 3}
s.SetOwnerID(ownerID)
require.Equal(t, ownerID, s.GetOwnerID())
ownerKey := []byte{4, 5, 6}
s.SetOwnerKey(ownerKey)
require.Equal(t, ownerKey, s.GetOwnerKey())
sig := []byte{7, 8, 9}
s.SetSignature(sig)
require.Equal(t, sig, s.GetSignature())
}

View file

@ -36,14 +36,18 @@ const ErrEmptyDataWithSignature = internal.Error("empty data with signature")
// negative length for slice allocation. // negative length for slice allocation.
const ErrNegativeLength = internal.Error("negative slice length") const ErrNegativeLength = internal.Error("negative slice length")
// ErrNilDataWithTokenSignAccumulator is returned by functions that expect // ErrNilRequestSignedData is returned by functions that expect
// a non-nil DataWithTokenSignAccumulator, but received nil. // a non-nil RequestSignedData, but received nil.
const ErrNilDataWithTokenSignAccumulator = internal.Error("signed data with token is nil") const ErrNilRequestSignedData = internal.Error("request signed data is nil")
// ErrNilSignatureKeySourceWithToken is returned by functions that expect // ErrNilRequestVerifyData is returned by functions that expect
// a non-nil SignatureKeySourceWithToken, but received nil. // a non-nil RequestVerifyData, but received nil.
const ErrNilSignatureKeySourceWithToken = internal.Error("key-signature source with token is nil") const ErrNilRequestVerifyData = internal.Error("request verification data is nil")
// ErrNilSignedDataReader is returned by functions that expect // ErrNilSignedDataReader is returned by functions that expect
// a non-nil SignedDataReader, but received nil. // a non-nil SignedDataReader, but received nil.
const ErrNilSignedDataReader = internal.Error("signed data reader is nil") const ErrNilSignedDataReader = internal.Error("signed data reader is nil")
// ErrNilSignKeyPairAccumulator is returned by functions that expect
// a non-nil SignKeyPairAccumulator, but received nil.
const ErrNilSignKeyPairAccumulator = internal.Error("signature-key pair accumulator is nil")

View file

@ -1,5 +1,17 @@
package service package service
import (
"io"
)
type extHdrWrapper struct {
msg *RequestExtendedHeader_KV
}
type extHdrSrcWrapper struct {
extHdrSrc ExtendedHeadersSource
}
// CutMeta returns current value and sets RequestMetaHeader to empty value. // CutMeta returns current value and sets RequestMetaHeader to empty value.
func (m *RequestMetaHeader) CutMeta() RequestMetaHeader { func (m *RequestMetaHeader) CutMeta() RequestMetaHeader {
cp := *m cp := *m
@ -11,3 +23,109 @@ func (m *RequestMetaHeader) CutMeta() RequestMetaHeader {
func (m *RequestMetaHeader) RestoreMeta(v RequestMetaHeader) { func (m *RequestMetaHeader) RestoreMeta(v RequestMetaHeader) {
*m = v *m = v
} }
// ExtendedHeadersSignedData wraps passed ExtendedHeadersSource and returns SignedDataSource.
func ExtendedHeadersSignedData(headers ExtendedHeadersSource) SignedDataSource {
return &extHdrSrcWrapper{
extHdrSrc: headers,
}
}
// SignedData returns extended headers in a binary representation.
func (s extHdrSrcWrapper) SignedData() ([]byte, error) {
return SignedDataFromReader(s)
}
// SignedDataSize returns the length of extended headers slice.
func (s extHdrSrcWrapper) SignedDataSize() (res int) {
if s.extHdrSrc != nil {
for _, h := range s.extHdrSrc.ExtendedHeaders() {
if h != nil {
res += len(h.Key()) + len(h.Value())
}
}
}
return
}
// ReadSignedData copies a binary representation of the extended headers to passed buffer.
//
// If buffer length is less than required, io.ErrUnexpectedEOF returns.
func (s extHdrSrcWrapper) ReadSignedData(p []byte) (int, error) {
sz := s.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
if s.extHdrSrc != nil {
off := 0
for _, h := range s.extHdrSrc.ExtendedHeaders() {
if h == nil {
continue
}
off += copy(p[off:], []byte(h.Key()))
off += copy(p[off:], []byte(h.Value()))
}
}
return sz, nil
}
// SetK is a K field setter.
func (m *RequestExtendedHeader_KV) SetK(v string) {
m.K = v
}
// SetV is a V field setter.
func (m *RequestExtendedHeader_KV) SetV(v string) {
m.V = v
}
// SetHeaders is a Headers field setter.
func (m *RequestExtendedHeader) SetHeaders(v []RequestExtendedHeader_KV) {
m.Headers = v
}
func wrapExtendedHeaderKV(msg *RequestExtendedHeader_KV) extHdrWrapper {
return extHdrWrapper{
msg: msg,
}
}
// Key returns the result of K field getter.
//
// If message is nil, empty string returns.
func (m extHdrWrapper) Key() string {
if m.msg != nil {
return m.msg.GetK()
}
return ""
}
// Value returns the result of V field getter.
//
// If message is nil, empty string returns.
func (m extHdrWrapper) Value() string {
if m.msg != nil {
return m.msg.GetV()
}
return ""
}
// ExtendedHeaders composes ExtendedHeader list from the Headers field getter result.
func (m RequestExtendedHeader) ExtendedHeaders() []ExtendedHeader {
hs := m.GetHeaders()
res := make([]ExtendedHeader, 0, len(hs))
for i := range hs {
res = append(res, wrapExtendedHeaderKV(&hs[i]))
}
return res
}

View file

@ -35,6 +35,8 @@ type RequestMetaHeader struct {
Version uint32 `protobuf:"varint,3,opt,name=Version,proto3" json:"Version,omitempty"` Version uint32 `protobuf:"varint,3,opt,name=Version,proto3" json:"Version,omitempty"`
// Raw determines whether the request is raw or not // Raw determines whether the request is raw or not
Raw bool `protobuf:"varint,4,opt,name=Raw,proto3" json:"Raw,omitempty"` Raw bool `protobuf:"varint,4,opt,name=Raw,proto3" json:"Raw,omitempty"`
// ExtendedHeader carries extended headers of the request
RequestExtendedHeader `protobuf:"bytes,5,opt,name=ExtendedHeader,proto3,embedded=ExtendedHeader" json:"ExtendedHeader"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -153,32 +155,139 @@ func (m *ResponseMetaHeader) GetVersion() uint32 {
return 0 return 0
} }
// RequestExtendedHeader contains extended headers of request
type RequestExtendedHeader struct {
// Headers carries list of key-value headers
Headers []RequestExtendedHeader_KV `protobuf:"bytes,1,rep,name=Headers,proto3" json:"Headers"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RequestExtendedHeader) Reset() { *m = RequestExtendedHeader{} }
func (m *RequestExtendedHeader) String() string { return proto.CompactTextString(m) }
func (*RequestExtendedHeader) ProtoMessage() {}
func (*RequestExtendedHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_a638867e7b43457c, []int{2}
}
func (m *RequestExtendedHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RequestExtendedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RequestExtendedHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_RequestExtendedHeader.Merge(m, src)
}
func (m *RequestExtendedHeader) XXX_Size() int {
return m.Size()
}
func (m *RequestExtendedHeader) XXX_DiscardUnknown() {
xxx_messageInfo_RequestExtendedHeader.DiscardUnknown(m)
}
var xxx_messageInfo_RequestExtendedHeader proto.InternalMessageInfo
func (m *RequestExtendedHeader) GetHeaders() []RequestExtendedHeader_KV {
if m != nil {
return m.Headers
}
return nil
}
// KV contains string key-value pair
type RequestExtendedHeader_KV struct {
// K carries extended header key
K string `protobuf:"bytes,1,opt,name=K,proto3" json:"K,omitempty"`
// V carries extended header value
V string `protobuf:"bytes,2,opt,name=V,proto3" json:"V,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RequestExtendedHeader_KV) Reset() { *m = RequestExtendedHeader_KV{} }
func (m *RequestExtendedHeader_KV) String() string { return proto.CompactTextString(m) }
func (*RequestExtendedHeader_KV) ProtoMessage() {}
func (*RequestExtendedHeader_KV) Descriptor() ([]byte, []int) {
return fileDescriptor_a638867e7b43457c, []int{2, 0}
}
func (m *RequestExtendedHeader_KV) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RequestExtendedHeader_KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RequestExtendedHeader_KV) XXX_Merge(src proto.Message) {
xxx_messageInfo_RequestExtendedHeader_KV.Merge(m, src)
}
func (m *RequestExtendedHeader_KV) XXX_Size() int {
return m.Size()
}
func (m *RequestExtendedHeader_KV) XXX_DiscardUnknown() {
xxx_messageInfo_RequestExtendedHeader_KV.DiscardUnknown(m)
}
var xxx_messageInfo_RequestExtendedHeader_KV proto.InternalMessageInfo
func (m *RequestExtendedHeader_KV) GetK() string {
if m != nil {
return m.K
}
return ""
}
func (m *RequestExtendedHeader_KV) GetV() string {
if m != nil {
return m.V
}
return ""
}
func init() { func init() {
proto.RegisterType((*RequestMetaHeader)(nil), "service.RequestMetaHeader") proto.RegisterType((*RequestMetaHeader)(nil), "service.RequestMetaHeader")
proto.RegisterType((*ResponseMetaHeader)(nil), "service.ResponseMetaHeader") proto.RegisterType((*ResponseMetaHeader)(nil), "service.ResponseMetaHeader")
proto.RegisterType((*RequestExtendedHeader)(nil), "service.RequestExtendedHeader")
proto.RegisterType((*RequestExtendedHeader_KV)(nil), "service.RequestExtendedHeader.KV")
} }
func init() { proto.RegisterFile("service/meta.proto", fileDescriptor_a638867e7b43457c) } func init() { proto.RegisterFile("service/meta.proto", fileDescriptor_a638867e7b43457c) }
var fileDescriptor_a638867e7b43457c = []byte{ var fileDescriptor_a638867e7b43457c = []byte{
// 261 bytes of a gzipped FileDescriptorProto // 362 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x4e, 0x2d, 0x2a, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x8e, 0xda, 0x40,
0xcb, 0x4c, 0x4e, 0xd5, 0xcf, 0x4d, 0x2d, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x10, 0x86, 0x19, 0x03, 0x01, 0x96, 0x24, 0x0a, 0xab, 0x44, 0xb2, 0x28, 0x8c, 0x43, 0xe5, 0x14,
0x87, 0x8a, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xb6, 0x25, 0xf2, 0x04, 0xa0, 0x10, 0x25, 0x72, 0x12, 0xa1, 0x05, 0xb9, 0x48, 0x67, 0xec, 0xc1,
0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xe5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0xe8, 0xb8, 0xc0, 0xeb, 0x78, 0x0d, 0x49, 0x91, 0x07, 0xc9, 0x33, 0xe4, 0x0d, 0xee, 0x0d, 0x28, 0x29,
0x53, 0x4a, 0xe7, 0x12, 0x0c, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0xf1, 0x4d, 0x2d, 0x49, 0xf4, 0xaf, 0x42, 0x27, 0xdf, 0x8b, 0x9c, 0xbc, 0x36, 0x27, 0x38, 0xdd, 0x5d, 0xf7, 0x7f, 0xb3, 0x33,
0x48, 0x4d, 0x4c, 0x49, 0x2d, 0x12, 0x12, 0xe0, 0x62, 0x0e, 0x09, 0xf1, 0x91, 0x60, 0x54, 0x60, 0x3b, 0x9f, 0x34, 0x84, 0x0a, 0x4c, 0x77, 0x91, 0x8f, 0xf6, 0x06, 0x33, 0xcf, 0x4a, 0x52, 0x9e,
0xd4, 0xe0, 0x0d, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0x5d, 0x0b, 0xf2, 0x93, 0x33, 0x24, 0x98, 0x71, 0xda, 0xaa, 0x6a, 0x7d, 0x33, 0x8c, 0xb2, 0xf5, 0x76, 0x69, 0xf9, 0x7c, 0x63, 0x87, 0x3c,
0x14, 0x18, 0x35, 0x58, 0x82, 0x20, 0x1c, 0x21, 0x09, 0x2e, 0xf6, 0xb0, 0xd4, 0xa2, 0xe2, 0xcc, 0xe4, 0xb6, 0x7c, 0x5f, 0x6e, 0x57, 0x92, 0x24, 0xc8, 0x54, 0xce, 0x0d, 0xaf, 0x80, 0xf4, 0x18,
0xfc, 0x3c, 0x09, 0x66, 0xb0, 0x5a, 0x18, 0x17, 0x64, 0x42, 0x50, 0x62, 0xb9, 0x04, 0x8b, 0x02, 0xfe, 0xda, 0xa2, 0xc8, 0xbe, 0x63, 0xe6, 0x7d, 0x41, 0x2f, 0xc0, 0x94, 0xbe, 0x21, 0xf5, 0xc5,
0xa3, 0x06, 0x47, 0x10, 0x88, 0xa9, 0xe4, 0xc2, 0x25, 0x14, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0xe2, 0x9b, 0x0a, 0x3a, 0x18, 0xaf, 0x58, 0x11, 0xe9, 0x5b, 0xd2, 0x9c, 0x26, 0xdc, 0x5f, 0xab,
0x9c, 0x8a, 0x64, 0x13, 0xdc, 0x5c, 0x46, 0x1c, 0xe6, 0x32, 0xa1, 0x98, 0xeb, 0x14, 0x7c, 0xe2, 0x8a, 0x0e, 0x46, 0x83, 0x95, 0x40, 0x55, 0xd2, 0x72, 0x31, 0x15, 0x11, 0x8f, 0xd5, 0xba, 0xec,
0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x37, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0x3d, 0x61, 0xf1, 0x03, 0xf3, 0x7e, 0xab, 0x0d, 0x1d, 0x8c, 0x36, 0x2b, 0x22, 0x9d, 0x91, 0xd7,
0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x26, 0x92, 0x9f, 0xf3, 0x8a, 0x0b, 0x92, 0x93, 0x75, 0x53, 0x52, 0xd3, 0x3f, 0x19, 0xc6, 0x01, 0x06, 0xe5, 0x16, 0xb5, 0xa9, 0x83, 0xd1, 0x1d, 0x69, 0x56, 0xa5,
0xcb, 0xf4, 0xf3, 0x52, 0xf3, 0xd3, 0x8a, 0x75, 0x13, 0x0b, 0x32, 0x75, 0xd3, 0xf3, 0xf5, 0xa1, 0x6e, 0x55, 0x1e, 0x97, 0x5d, 0x93, 0xf6, 0xfe, 0x38, 0xa8, 0x1d, 0x8e, 0x03, 0x60, 0x0f, 0xe6,
0xc1, 0xb3, 0x8a, 0x49, 0xd0, 0x2f, 0x35, 0xdf, 0x2d, 0x58, 0xcf, 0x31, 0xc0, 0x53, 0x2f, 0x18, 0x87, 0x9f, 0x08, 0x65, 0x28, 0x12, 0x1e, 0x0b, 0x3c, 0x73, 0xbf, 0x37, 0x85, 0x27, 0x4c, 0x95,
0x22, 0x96, 0xc4, 0x06, 0x0e, 0x0a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x12, 0x93, 0x0b, 0xd3, 0xe1, 0x5f, 0xf2, 0xee, 0xd1, 0xc5, 0x74, 0x4c, 0x5a, 0x65, 0x12, 0x2a, 0xe8, 0x75,
0x5e, 0x58, 0x01, 0x00, 0x00, 0xa3, 0x3b, 0x7a, 0xff, 0xbc, 0xa9, 0xe5, 0xb8, 0x93, 0x46, 0x21, 0xcb, 0x4e, 0x73, 0x7d, 0x9d,
0x28, 0x8e, 0x4b, 0x5f, 0x12, 0x70, 0xa4, 0x4d, 0x87, 0x81, 0x53, 0x90, 0x2b, 0x1d, 0x3a, 0x0c,
0xdc, 0xc9, 0x7c, 0x9f, 0x6b, 0x70, 0xc8, 0x35, 0xb8, 0xce, 0x35, 0xb8, 0xc9, 0x35, 0xf8, 0x77,
0xab, 0xd5, 0x7e, 0x7e, 0x38, 0x3b, 0x62, 0x2c, 0x12, 0xdf, 0x37, 0x03, 0xdc, 0xd9, 0x31, 0xf2,
0x95, 0x30, 0xbd, 0x24, 0x32, 0x43, 0x6e, 0x57, 0x2a, 0xff, 0x95, 0xde, 0x0f, 0xe4, 0x9f, 0xe7,
0xd6, 0x78, 0xf6, 0xd5, 0x9a, 0x97, 0xb5, 0xe5, 0x0b, 0x79, 0xdb, 0x8f, 0x77, 0x01, 0x00, 0x00,
0xff, 0xff, 0xf3, 0xf7, 0x0a, 0xb8, 0x29, 0x02, 0x00, 0x00,
} }
func (m *RequestMetaHeader) Marshal() (dAtA []byte, err error) { func (m *RequestMetaHeader) Marshal() (dAtA []byte, err error) {
@ -205,6 +314,16 @@ func (m *RequestMetaHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized) i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized)
} }
{
size, err := m.RequestExtendedHeader.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMeta(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
if m.Raw { if m.Raw {
i-- i--
if m.Raw { if m.Raw {
@ -270,6 +389,88 @@ func (m *ResponseMetaHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *RequestExtendedHeader) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RequestExtendedHeader) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RequestExtendedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Headers) > 0 {
for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMeta(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *RequestExtendedHeader_KV) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RequestExtendedHeader_KV) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RequestExtendedHeader_KV) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.V) > 0 {
i -= len(m.V)
copy(dAtA[i:], m.V)
i = encodeVarintMeta(dAtA, i, uint64(len(m.V)))
i--
dAtA[i] = 0x12
}
if len(m.K) > 0 {
i -= len(m.K)
copy(dAtA[i:], m.K)
i = encodeVarintMeta(dAtA, i, uint64(len(m.K)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintMeta(dAtA []byte, offset int, v uint64) int { func encodeVarintMeta(dAtA []byte, offset int, v uint64) int {
offset -= sovMeta(v) offset -= sovMeta(v)
base := offset base := offset
@ -299,6 +500,8 @@ func (m *RequestMetaHeader) Size() (n int) {
if m.Raw { if m.Raw {
n += 2 n += 2
} }
l = m.RequestExtendedHeader.Size()
n += 1 + l + sovMeta(uint64(l))
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -323,6 +526,44 @@ func (m *ResponseMetaHeader) Size() (n int) {
return n return n
} }
func (m *RequestExtendedHeader) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Headers) > 0 {
for _, e := range m.Headers {
l = e.Size()
n += 1 + l + sovMeta(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RequestExtendedHeader_KV) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.K)
if l > 0 {
n += 1 + l + sovMeta(uint64(l))
}
l = len(m.V)
if l > 0 {
n += 1 + l + sovMeta(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovMeta(x uint64) (n int) { func sovMeta(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7 return (math_bits.Len64(x|1) + 6) / 7
} }
@ -435,6 +676,39 @@ func (m *RequestMetaHeader) Unmarshal(dAtA []byte) error {
} }
} }
m.Raw = bool(v != 0) m.Raw = bool(v != 0)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RequestExtendedHeader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMeta
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMeta
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMeta
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.RequestExtendedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipMeta(dAtA[iNdEx:]) skippy, err := skipMeta(dAtA[iNdEx:])
@ -552,6 +826,212 @@ func (m *ResponseMetaHeader) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *RequestExtendedHeader) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMeta
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RequestExtendedHeader: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RequestExtendedHeader: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMeta
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMeta
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMeta
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Headers = append(m.Headers, RequestExtendedHeader_KV{})
if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMeta(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMeta
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMeta
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RequestExtendedHeader_KV) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMeta
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KV: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field K", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMeta
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMeta
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMeta
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.K = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field V", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMeta
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMeta
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMeta
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.V = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMeta(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMeta
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMeta
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMeta(dAtA []byte) (n int, err error) { func skipMeta(dAtA []byte) (n int, err error) {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

View file

@ -19,6 +19,8 @@ message RequestMetaHeader {
uint32 Version = 3; uint32 Version = 3;
// Raw determines whether the request is raw or not // Raw determines whether the request is raw or not
bool Raw = 4; bool Raw = 4;
// ExtendedHeader carries extended headers of the request
RequestExtendedHeader ExtendedHeader = 5 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
} }
// ResponseMetaHeader contains meta information based on request processing by server // ResponseMetaHeader contains meta information based on request processing by server
@ -30,3 +32,18 @@ message ResponseMetaHeader {
// TODO: not used for now, should be implemented in future // TODO: not used for now, should be implemented in future
uint32 Version = 2; uint32 Version = 2;
} }
// RequestExtendedHeader contains extended headers of request
message RequestExtendedHeader {
// KV contains string key-value pair
message KV {
// K carries extended header key
string K = 1;
// V carries extended header value
string V = 2;
}
// Headers carries list of key-value headers
repeated KV Headers = 1 [(gogoproto.nullable) = false];
}

View file

@ -23,3 +23,77 @@ func TestCutRestoreMeta(t *testing.T) {
require.Equal(t, item(), v1) require.Equal(t, item(), v1)
} }
} }
func TestRequestExtendedHeader_KV_Setters(t *testing.T) {
s := new(RequestExtendedHeader_KV)
key := "key"
s.SetK(key)
require.Equal(t, key, s.GetK())
val := "val"
s.SetV(val)
require.Equal(t, val, s.GetV())
}
func TestRequestExtendedHeader_SetHeaders(t *testing.T) {
s := new(RequestExtendedHeader)
hdr := RequestExtendedHeader_KV{}
hdr.SetK("key")
hdr.SetV("val")
hdrs := []RequestExtendedHeader_KV{
hdr,
}
s.SetHeaders(hdrs)
require.Equal(t, hdrs, s.GetHeaders())
}
func TestExtHdrWrapper(t *testing.T) {
s := wrapExtendedHeaderKV(nil)
require.Empty(t, s.Key())
require.Empty(t, s.Value())
msg := new(RequestExtendedHeader_KV)
s = wrapExtendedHeaderKV(msg)
key := "key"
msg.SetK(key)
require.Equal(t, key, s.Key())
val := "val"
msg.SetV(val)
require.Equal(t, val, s.Value())
}
func TestRequestExtendedHeader_ExtendedHeaders(t *testing.T) {
var (
k1, v1 = "key1", "value1"
k2, v2 = "key2", "value2"
h1 = new(RequestExtendedHeader_KV)
h2 = new(RequestExtendedHeader_KV)
)
h1.SetK(k1)
h1.SetV(v1)
h2.SetK(k2)
h2.SetV(v2)
s := new(RequestExtendedHeader)
s.SetHeaders([]RequestExtendedHeader_KV{
*h1, *h2,
})
xHdrs := s.ExtendedHeaders()
require.Len(t, xHdrs, 2)
require.Equal(t, k1, xHdrs[0].Key())
require.Equal(t, v1, xHdrs[0].Value())
require.Equal(t, k2, xHdrs[1].Key())
require.Equal(t, v2, xHdrs[1].Value())
}

View file

@ -2,9 +2,11 @@ package service
import ( import (
"crypto/ecdsa" "crypto/ecdsa"
"io"
"sync" "sync"
crypto "github.com/nspcc-dev/neofs-crypto" crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/pkg/errors"
) )
type keySign struct { type keySign struct {
@ -12,6 +14,20 @@ type keySign struct {
sign []byte sign []byte
} }
type signSourceGroup struct {
SignKeyPairSource
SignKeyPairAccumulator
sources []SignedDataSource
}
type signReadersGroup struct {
SignKeyPairSource
SignKeyPairAccumulator
readers []SignedDataReader
}
var bytesPool = sync.Pool{ var bytesPool = sync.Pool{
New: func() interface{} { New: func() interface{} {
return make([]byte, 5<<20) return make([]byte, 5<<20)
@ -176,54 +192,199 @@ func VerifySignatureWithKey(key *ecdsa.PublicKey, src DataWithSignature) error {
) )
} }
// SignDataWithSessionToken calculates data with token signature and adds it to accumulator. // SignRequestData calculates request data signature and adds it to accumulator.
// //
// Any change of data or session token info provoke signature breakdown. // Any change of request data provoke signature breakdown.
// //
// If passed private key is nil, crypto.ErrEmptyPrivateKey returns. // If passed private key is nil, crypto.ErrEmptyPrivateKey returns.
// If passed DataWithTokenSignAccumulator is nil, ErrNilDataWithTokenSignAccumulator returns. // If passed RequestSignedData is nil, ErrNilRequestSignedData returns.
func SignDataWithSessionToken(key *ecdsa.PrivateKey, src DataWithTokenSignAccumulator) error { func SignRequestData(key *ecdsa.PrivateKey, src RequestSignedData) error {
if src == nil { if src == nil {
return ErrNilDataWithTokenSignAccumulator return ErrNilRequestSignedData
} else if r, ok := src.(SignedDataReader); ok { }
return AddSignatureWithKey(key, &signDataReaderWithToken{
SignedDataSource: src,
SignKeyPairAccumulator: src,
rdr: r, sigSrc, err := GroupSignedPayloads(
token: src.GetSessionToken(), src,
}, src,
NewSignedSessionToken(
src.GetSessionToken(),
),
NewSignedBearerToken(
src.GetBearerToken(),
),
ExtendedHeadersSignedData(src),
) )
if err != nil {
return err
} }
return AddSignatureWithKey(key, &signAccumWithToken{ return AddSignatureWithKey(key, sigSrc)
SignedDataSource: src,
SignKeyPairAccumulator: src,
token: src.GetSessionToken(),
})
} }
// VerifyAccumulatedSignaturesWithToken checks if accumulated key-signature pairs of data with token are valid. // VerifyRequestData checks if accumulated key-signature pairs of data with token are valid.
// //
// If passed DataWithTokenSignSource is nil, ErrNilSignatureKeySourceWithToken returns. // If passed RequestVerifyData is nil, ErrNilRequestVerifyData returns.
func VerifyAccumulatedSignaturesWithToken(src DataWithTokenSignSource) error { func VerifyRequestData(src RequestVerifyData) error {
if src == nil { if src == nil {
return ErrNilSignatureKeySourceWithToken return ErrNilRequestVerifyData
} else if r, ok := src.(SignedDataReader); ok {
return VerifyAccumulatedSignatures(&signDataReaderWithToken{
SignedDataSource: src,
SignKeyPairSource: src,
rdr: r,
token: src.GetSessionToken(),
})
} }
return VerifyAccumulatedSignatures(&signAccumWithToken{ verSrc, err := GroupVerifyPayloads(
SignedDataSource: src, src,
SignKeyPairSource: src, src,
NewVerifiedSessionToken(
src.GetSessionToken(),
),
NewVerifiedBearerToken(
src.GetBearerToken(),
),
ExtendedHeadersSignedData(src),
)
if err != nil {
return err
}
token: src.GetSessionToken(), return VerifyAccumulatedSignatures(verSrc)
}) }
// SignedData returns payload bytes concatenation from all sources keeping order.
func (s signSourceGroup) SignedData() ([]byte, error) {
chunks := make([][]byte, 0, len(s.sources))
sz := 0
for i := range s.sources {
data, err := s.sources[i].SignedData()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed payload of element #%d", i)
}
chunks = append(chunks, data)
sz += len(data)
}
res := make([]byte, sz)
off := 0
for i := range chunks {
off += copy(res[off:], chunks[i])
}
return res, nil
}
// SignedData returns payload bytes concatenation from all readers.
func (s signReadersGroup) SignedData() ([]byte, error) {
return SignedDataFromReader(s)
}
// SignedDataSize returns the sum of sizes of all readers.
func (s signReadersGroup) SignedDataSize() (sz int) {
for i := range s.readers {
sz += s.readers[i].SignedDataSize()
}
return
}
// ReadSignedData reads data from all readers to passed buffer keeping order.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (s signReadersGroup) ReadSignedData(p []byte) (int, error) {
sz := s.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
off := 0
for i := range s.readers {
n, err := s.readers[i].ReadSignedData(p[off:])
off += n
if err != nil {
return off, errors.Wrapf(err, "could not read signed payload of element #%d", i)
}
}
return off, nil
}
// GroupSignedPayloads groups SignKeyPairAccumulator and SignedDataSource list to DataWithSignKeyAccumulator.
//
// If passed SignKeyPairAccumulator is nil, ErrNilSignKeyPairAccumulator returns.
//
// Signed payload of the result is a concatenation of payloads of list elements keeping order.
// Nil elements in list are ignored.
//
// If all elements implement SignedDataReader, result implements it too.
func GroupSignedPayloads(acc SignKeyPairAccumulator, sources ...SignedDataSource) (DataWithSignKeyAccumulator, error) {
if acc == nil {
return nil, ErrNilSignKeyPairAccumulator
}
return groupPayloads(acc, nil, sources...), nil
}
// GroupVerifyPayloads groups SignKeyPairSource and SignedDataSource list to DataWithSignKeySource.
//
// If passed SignKeyPairSource is nil, ErrNilSignatureKeySource returns.
//
// Signed payload of the result is a concatenation of payloads of list elements keeping order.
// Nil elements in list are ignored.
//
// If all elements implement SignedDataReader, result implements it too.
func GroupVerifyPayloads(src SignKeyPairSource, sources ...SignedDataSource) (DataWithSignKeySource, error) {
if src == nil {
return nil, ErrNilSignatureKeySource
}
return groupPayloads(nil, src, sources...), nil
}
func groupPayloads(acc SignKeyPairAccumulator, src SignKeyPairSource, sources ...SignedDataSource) interface {
SignedDataSource
SignKeyPairSource
SignKeyPairAccumulator
} {
var allReaders bool
for i := range sources {
if sources[i] == nil {
continue
} else if _, allReaders = sources[i].(SignedDataReader); !allReaders {
break
}
}
if !allReaders {
res := &signSourceGroup{
SignKeyPairSource: src,
SignKeyPairAccumulator: acc,
sources: make([]SignedDataSource, 0, len(sources)),
}
for i := range sources {
if sources[i] != nil {
res.sources = append(res.sources, sources[i])
}
}
return res
}
res := &signReadersGroup{
SignKeyPairSource: src,
SignKeyPairAccumulator: acc,
readers: make([]SignedDataReader, 0, len(sources)),
}
for i := range sources {
if sources[i] != nil {
res.readers = append(res.readers, sources[i].(SignedDataReader))
}
}
return res
} }

View file

@ -18,6 +18,10 @@ type testSignedDataSrc struct {
sig []byte sig []byte
key *ecdsa.PublicKey key *ecdsa.PublicKey
token SessionToken token SessionToken
bearer BearerToken
extHdrs []ExtendedHeader
} }
type testSignedDataReader struct { type testSignedDataReader struct {
@ -54,6 +58,14 @@ func (s testSignedDataSrc) GetSessionToken() SessionToken {
return s.token return s.token
} }
func (s testSignedDataSrc) GetBearerToken() BearerToken {
return s.bearer
}
func (s testSignedDataSrc) ExtendedHeaders() []ExtendedHeader {
return s.extHdrs
}
func (s testSignedDataReader) SignedDataSize() int { func (s testSignedDataReader) SignedDataSize() int {
return len(s.data) return len(s.data)
} }
@ -256,47 +268,63 @@ func TestVerifySignatureWithKey(t *testing.T) {
require.Error(t, VerifySignatureWithKey(&sk.PublicKey, src)) require.Error(t, VerifySignatureWithKey(&sk.PublicKey, src))
} }
func TestSignVerifyDataWithSessionToken(t *testing.T) { func TestSignVerifyRequestData(t *testing.T) {
// sign with empty DataWithTokenSignAccumulator // sign with empty RequestSignedData
require.EqualError(t, require.EqualError(t,
SignDataWithSessionToken(nil, nil), SignRequestData(nil, nil),
ErrNilDataWithTokenSignAccumulator.Error(), ErrNilRequestSignedData.Error(),
) )
// verify with empty DataWithTokenSignSource // verify with empty RequestVerifyData
require.EqualError(t, require.EqualError(t,
VerifyAccumulatedSignaturesWithToken(nil), VerifyRequestData(nil),
ErrNilSignatureKeySourceWithToken.Error(), ErrNilRequestVerifyData.Error(),
) )
// create test session token // create test session token
var ( var (
token = new(Token) token = new(Token)
initVerb = Token_Info_Verb(1) initVerb = Token_Info_Verb(1)
bearer = new(BearerTokenMsg)
bearerEpoch = uint64(8)
extHdrKey = "key"
extHdr = new(RequestExtendedHeader_KV)
) )
token.SetVerb(initVerb) token.SetVerb(initVerb)
bearer.SetExpirationEpoch(bearerEpoch)
extHdr.SetK(extHdrKey)
// create test data with token // create test data with token
src := &testSignedDataSrc{ src := &testSignedDataSrc{
data: testData(t, 10), data: testData(t, 10),
token: token, token: token,
bearer: bearer,
extHdrs: []ExtendedHeader{
wrapExtendedHeaderKV(extHdr),
},
} }
// create test private key // create test private key
sk := test.DecodeKey(0) sk := test.DecodeKey(0)
// sign with private key // sign with private key
require.NoError(t, SignDataWithSessionToken(sk, src)) require.NoError(t, SignRequestData(sk, src))
// ascertain that verification is passed // ascertain that verification is passed
require.NoError(t, VerifyAccumulatedSignaturesWithToken(src)) require.NoError(t, VerifyRequestData(src))
// break the data // break the data
src.data[0]++ src.data[0]++
// ascertain that verification is failed // ascertain that verification is failed
require.Error(t, VerifyAccumulatedSignaturesWithToken(src)) require.Error(t, VerifyRequestData(src))
// restore the data // restore the data
src.data[0]-- src.data[0]--
@ -305,13 +333,37 @@ func TestSignVerifyDataWithSessionToken(t *testing.T) {
token.SetVerb(initVerb + 1) token.SetVerb(initVerb + 1)
// ascertain that verification is failed // ascertain that verification is failed
require.Error(t, VerifyAccumulatedSignaturesWithToken(src)) require.Error(t, VerifyRequestData(src))
// restore the token // restore the token
token.SetVerb(initVerb) token.SetVerb(initVerb)
// ascertain that verification is passed // ascertain that verification is passed
require.NoError(t, VerifyAccumulatedSignaturesWithToken(src)) require.NoError(t, VerifyRequestData(src))
// break the Bearer token
bearer.SetExpirationEpoch(bearerEpoch + 1)
// ascertain that verification is failed
require.Error(t, VerifyRequestData(src))
// restore the Bearer token
bearer.SetExpirationEpoch(bearerEpoch)
// ascertain that verification is passed
require.NoError(t, VerifyRequestData(src))
// break the extended header
extHdr.SetK(extHdrKey + "1")
// ascertain that verification is failed
require.Error(t, VerifyRequestData(src))
// restore the extended header
extHdr.SetK(extHdrKey)
// ascertain that verification is passed
require.NoError(t, VerifyRequestData(src))
// wrap to data reader // wrap to data reader
rdr := &testSignedDataReader{ rdr := &testSignedDataReader{
@ -319,8 +371,8 @@ func TestSignVerifyDataWithSessionToken(t *testing.T) {
} }
// sign with private key // sign with private key
require.NoError(t, SignDataWithSessionToken(sk, rdr)) require.NoError(t, SignRequestData(sk, rdr))
// ascertain that verification is passed // ascertain that verification is passed
require.NoError(t, VerifyAccumulatedSignaturesWithToken(rdr)) require.NoError(t, VerifyRequestData(rdr))
} }

View file

@ -250,20 +250,67 @@ type DataWithSignKeySource interface {
SignKeyPairSource SignKeyPairSource
} }
// SignedDataWithToken is an interface of data-token pair with read access. // RequestData is an interface of the request information with read access.
type SignedDataWithToken interface { type RequestData interface {
SignedDataSource SignedDataSource
SessionTokenSource SessionTokenSource
BearerTokenSource
ExtendedHeadersSource
} }
// DataWithTokenSignAccumulator is an interface of data-token pair with signature write access. // RequestSignedData is an interface of request information with signature write access.
type DataWithTokenSignAccumulator interface { type RequestSignedData interface {
SignedDataWithToken RequestData
SignKeyPairAccumulator SignKeyPairAccumulator
} }
// DataWithTokenSignSource is an interface of data-token pair with signature read access. // RequestVerifyData is an interface of request information with signature read access.
type DataWithTokenSignSource interface { type RequestVerifyData interface {
SignedDataWithToken RequestData
SignKeyPairSource SignKeyPairSource
} }
// ACLRulesSource is an interface of the container of binary extended ACL rules with read access.
type ACLRulesSource interface {
GetACLRules() []byte
}
// ACLRulesContainer is an interface of the container of binary extended ACL rules.
type ACLRulesContainer interface {
ACLRulesSource
SetACLRules([]byte)
}
// BearerTokenInfo is an interface of a fixed set of Bearer token information value containers.
// Contains:
// - binary extended ACL rules;
// - expiration epoch number;
// - ID of the token's owner.
type BearerTokenInfo interface {
ACLRulesContainer
ExpirationEpochContainer
OwnerIDContainer
}
// BearerToken is an interface of Bearer token information and key-signature pair.
type BearerToken interface {
BearerTokenInfo
OwnerKeyContainer
SignatureContainer
}
// BearerTokenSource is an interface of the container of a BearerToken with read access.
type BearerTokenSource interface {
GetBearerToken() BearerToken
}
// ExtendedHeader is an interface of string key-value pair with read access.
type ExtendedHeader interface {
Key() string
Value() string
}
// ExtendedHeadersSource is an interface of ExtendedHeader list with read access.
type ExtendedHeadersSource interface {
ExtendedHeaders() []ExtendedHeader
}

View file

@ -67,6 +67,11 @@ func (m *RequestVerificationHeader) SetToken(token *Token) {
m.Token = token m.Token = token
} }
// SetBearer is a Bearer field setter.
func (m *RequestVerificationHeader) SetBearer(v *BearerTokenMsg) {
m.Bearer = v
}
// testCustomField for test usage only. // testCustomField for test usage only.
type testCustomField [8]uint32 type testCustomField [8]uint32
@ -98,3 +103,14 @@ func (t testCustomField) MarshalTo(data []byte) (int, error) { return 0, nil }
// Marshal skip, it's for test usage only. // Marshal skip, it's for test usage only.
func (t testCustomField) Marshal() ([]byte, error) { return nil, nil } func (t testCustomField) Marshal() ([]byte, error) { return nil, nil }
// GetBearerToken wraps Bearer field and return BearerToken interface.
//
// If Bearer field value is nil, nil returns.
func (m RequestVerificationHeader) GetBearerToken() BearerToken {
if t := m.GetBearer(); t != nil {
return t
}
return nil
}

View file

@ -79,6 +79,8 @@ type RequestVerificationHeader struct {
Signatures []*RequestVerificationHeader_Signature `protobuf:"bytes,1,rep,name=Signatures,proto3" json:"Signatures,omitempty"` Signatures []*RequestVerificationHeader_Signature `protobuf:"bytes,1,rep,name=Signatures,proto3" json:"Signatures,omitempty"`
// Token is a token of the session within which the request is sent // Token is a token of the session within which the request is sent
Token *Token `protobuf:"bytes,2,opt,name=Token,proto3" json:"Token,omitempty"` Token *Token `protobuf:"bytes,2,opt,name=Token,proto3" json:"Token,omitempty"`
// Bearer is a Bearer token of the request
Bearer *BearerTokenMsg `protobuf:"bytes,3,opt,name=Bearer,proto3" json:"Bearer,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -127,6 +129,13 @@ func (m *RequestVerificationHeader) GetToken() *Token {
return nil return nil
} }
func (m *RequestVerificationHeader) GetBearer() *BearerTokenMsg {
if m != nil {
return m.Bearer
}
return nil
}
type RequestVerificationHeader_Signature struct { type RequestVerificationHeader_Signature struct {
// Sign is signature of the request or session key. // Sign is signature of the request or session key.
Sign []byte `protobuf:"bytes,1,opt,name=Sign,proto3" json:"Sign,omitempty"` Sign []byte `protobuf:"bytes,1,opt,name=Sign,proto3" json:"Sign,omitempty"`
@ -351,6 +360,117 @@ func (m *TokenLifetime) GetValidUntil() uint64 {
return 0 return 0
} }
// BearerTokenMsg carries information about request ACL rules with limited lifetime
type BearerTokenMsg struct {
// TokenInfo is a grouped information about token
BearerTokenMsg_Info `protobuf:"bytes,1,opt,name=TokenInfo,proto3,embedded=TokenInfo" json:"TokenInfo"`
// OwnerKey is a public key of the token owner
OwnerKey []byte `protobuf:"bytes,2,opt,name=OwnerKey,proto3" json:"OwnerKey,omitempty"`
// Signature is a signature of token information
Signature []byte `protobuf:"bytes,3,opt,name=Signature,proto3" json:"Signature,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BearerTokenMsg) Reset() { *m = BearerTokenMsg{} }
func (m *BearerTokenMsg) String() string { return proto.CompactTextString(m) }
func (*BearerTokenMsg) ProtoMessage() {}
func (*BearerTokenMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_4bdd5bc50ec96238, []int{3}
}
func (m *BearerTokenMsg) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BearerTokenMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *BearerTokenMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_BearerTokenMsg.Merge(m, src)
}
func (m *BearerTokenMsg) XXX_Size() int {
return m.Size()
}
func (m *BearerTokenMsg) XXX_DiscardUnknown() {
xxx_messageInfo_BearerTokenMsg.DiscardUnknown(m)
}
var xxx_messageInfo_BearerTokenMsg proto.InternalMessageInfo
func (m *BearerTokenMsg) GetOwnerKey() []byte {
if m != nil {
return m.OwnerKey
}
return nil
}
func (m *BearerTokenMsg) GetSignature() []byte {
if m != nil {
return m.Signature
}
return nil
}
type BearerTokenMsg_Info struct {
// ACLRules carries a binary representation of the table of extended ACL rules
ACLRules []byte `protobuf:"bytes,1,opt,name=ACLRules,proto3" json:"ACLRules,omitempty"`
// OwnerID is an owner of token
OwnerID OwnerID `protobuf:"bytes,2,opt,name=OwnerID,proto3,customtype=OwnerID" json:"OwnerID"`
// ValidUntil carries a last epoch of token lifetime
ValidUntil uint64 `protobuf:"varint,3,opt,name=ValidUntil,proto3" json:"ValidUntil,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BearerTokenMsg_Info) Reset() { *m = BearerTokenMsg_Info{} }
func (m *BearerTokenMsg_Info) String() string { return proto.CompactTextString(m) }
func (*BearerTokenMsg_Info) ProtoMessage() {}
func (*BearerTokenMsg_Info) Descriptor() ([]byte, []int) {
return fileDescriptor_4bdd5bc50ec96238, []int{3, 0}
}
func (m *BearerTokenMsg_Info) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BearerTokenMsg_Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *BearerTokenMsg_Info) XXX_Merge(src proto.Message) {
xxx_messageInfo_BearerTokenMsg_Info.Merge(m, src)
}
func (m *BearerTokenMsg_Info) XXX_Size() int {
return m.Size()
}
func (m *BearerTokenMsg_Info) XXX_DiscardUnknown() {
xxx_messageInfo_BearerTokenMsg_Info.DiscardUnknown(m)
}
var xxx_messageInfo_BearerTokenMsg_Info proto.InternalMessageInfo
func (m *BearerTokenMsg_Info) GetACLRules() []byte {
if m != nil {
return m.ACLRules
}
return nil
}
func (m *BearerTokenMsg_Info) GetValidUntil() uint64 {
if m != nil {
return m.ValidUntil
}
return 0
}
func init() { func init() {
proto.RegisterEnum("service.Token_Info_Verb", Token_Info_Verb_name, Token_Info_Verb_value) proto.RegisterEnum("service.Token_Info_Verb", Token_Info_Verb_name, Token_Info_Verb_value)
proto.RegisterType((*RequestVerificationHeader)(nil), "service.RequestVerificationHeader") proto.RegisterType((*RequestVerificationHeader)(nil), "service.RequestVerificationHeader")
@ -358,49 +478,56 @@ func init() {
proto.RegisterType((*Token)(nil), "service.Token") proto.RegisterType((*Token)(nil), "service.Token")
proto.RegisterType((*Token_Info)(nil), "service.Token.Info") proto.RegisterType((*Token_Info)(nil), "service.Token.Info")
proto.RegisterType((*TokenLifetime)(nil), "service.TokenLifetime") proto.RegisterType((*TokenLifetime)(nil), "service.TokenLifetime")
proto.RegisterType((*BearerTokenMsg)(nil), "service.BearerTokenMsg")
proto.RegisterType((*BearerTokenMsg_Info)(nil), "service.BearerTokenMsg.Info")
} }
func init() { proto.RegisterFile("service/verify.proto", fileDescriptor_4bdd5bc50ec96238) } func init() { proto.RegisterFile("service/verify.proto", fileDescriptor_4bdd5bc50ec96238) }
var fileDescriptor_4bdd5bc50ec96238 = []byte{ var fileDescriptor_4bdd5bc50ec96238 = []byte{
// 579 bytes of a gzipped FileDescriptorProto // 658 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x53, 0x4f, 0x6f, 0x12, 0x41, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x5a,
0x14, 0xef, 0xc0, 0xc2, 0xc2, 0xeb, 0x1f, 0xd7, 0xd1, 0x98, 0x95, 0x18, 0x20, 0xc4, 0x03, 0x4d, 0x10, 0xae, 0x63, 0x27, 0x76, 0xa6, 0x3f, 0xd7, 0xf7, 0xdc, 0xab, 0x7b, 0x4d, 0x54, 0x25, 0x55,
0x64, 0x49, 0x68, 0x62, 0x4c, 0xf4, 0x52, 0x24, 0x5a, 0x62, 0xa3, 0xcd, 0x50, 0x7b, 0xf0, 0xb6, 0xc4, 0xa2, 0x95, 0x88, 0x2d, 0xa5, 0x12, 0x42, 0x82, 0x4d, 0xd3, 0x08, 0x1a, 0x51, 0xa0, 0x3a,
0xc0, 0x63, 0x3b, 0xb1, 0xdd, 0xc1, 0x99, 0x01, 0xd3, 0xef, 0xe1, 0xc1, 0xcf, 0xe0, 0xe7, 0xf0, 0x29, 0x5d, 0xb0, 0x73, 0x92, 0x89, 0x6b, 0x91, 0xda, 0xe1, 0x1c, 0x27, 0xa8, 0xef, 0xc1, 0x82,
0xd0, 0x63, 0x8f, 0xc6, 0x03, 0x31, 0xf8, 0x29, 0xbc, 0x99, 0x99, 0x5d, 0x60, 0x9b, 0xe8, 0xed, 0x67, 0xe0, 0x49, 0xba, 0xec, 0x12, 0xb1, 0x88, 0x50, 0x78, 0x06, 0x16, 0xac, 0x40, 0x1e, 0x3b,
0xf7, 0x7e, 0xef, 0xfd, 0xde, 0xef, 0xcd, 0x9b, 0x19, 0xb8, 0xaf, 0x50, 0xce, 0xf9, 0x08, 0xdb, 0x89, 0x5d, 0x28, 0x12, 0xbb, 0x6f, 0xbe, 0xf9, 0xe6, 0xe7, 0x4c, 0xbe, 0x18, 0xfe, 0x95, 0x28,
0x73, 0x94, 0x7c, 0x72, 0x15, 0x4c, 0xa5, 0xd0, 0x82, 0xba, 0x29, 0x5b, 0xf1, 0x24, 0x4e, 0x54, 0xa6, 0x7e, 0x1f, 0x9d, 0x29, 0x0a, 0x7f, 0x78, 0x69, 0x8f, 0x45, 0x18, 0x85, 0x4c, 0x4f, 0xd9,
0x5b, 0x5f, 0x4d, 0x51, 0x25, 0xa9, 0x4a, 0x2b, 0xe2, 0xfa, 0x7c, 0x36, 0x0c, 0x46, 0xe2, 0xb2, 0x8a, 0x29, 0x70, 0x28, 0x9d, 0xe8, 0x72, 0x8c, 0x32, 0x49, 0x55, 0x1a, 0x9e, 0x1f, 0x9d, 0x4f,
0x1d, 0x89, 0x48, 0xb4, 0x2d, 0x3d, 0x9c, 0x4d, 0x6c, 0x64, 0x03, 0x8b, 0x92, 0xf2, 0xc6, 0x77, 0x7a, 0x76, 0x3f, 0xbc, 0x70, 0xbc, 0xd0, 0x0b, 0x1d, 0xa2, 0x7b, 0x93, 0x21, 0x45, 0x14, 0x10,
0x02, 0x0f, 0x19, 0x7e, 0x9a, 0xa1, 0xd2, 0x67, 0xc6, 0x81, 0x8f, 0x42, 0xcd, 0x45, 0x7c, 0x84, 0x4a, 0xe4, 0xf5, 0xaf, 0x0a, 0xdc, 0xe1, 0xf8, 0x66, 0x82, 0x32, 0x3a, 0x8b, 0x27, 0xf8, 0x7d,
0xe1, 0x18, 0x25, 0x3d, 0x06, 0x18, 0xf0, 0x28, 0x0e, 0xf5, 0x4c, 0xa2, 0xf2, 0x49, 0x3d, 0xdf, 0x37, 0xf2, 0xc3, 0xe0, 0x08, 0xdd, 0x01, 0x0a, 0x76, 0x0c, 0xd0, 0xf5, 0xbd, 0xc0, 0x8d, 0x26,
0xdc, 0xee, 0x3c, 0x09, 0x52, 0xf3, 0xe0, 0xbf, 0xba, 0x60, 0x2d, 0x62, 0x19, 0x3d, 0x7d, 0x0c, 0x02, 0xa5, 0xa5, 0xec, 0xa8, 0xbb, 0xeb, 0xcd, 0x7b, 0x76, 0x3a, 0xdc, 0xbe, 0xb5, 0xce, 0x5e,
0x85, 0x53, 0xf1, 0x11, 0x63, 0x3f, 0x57, 0x27, 0xcd, 0xed, 0xce, 0xde, 0xba, 0x91, 0x65, 0x59, 0x16, 0xf1, 0x4c, 0x3d, 0xbb, 0x0b, 0xc5, 0xd3, 0xf0, 0x35, 0x06, 0x56, 0x61, 0x47, 0xd9, 0x5d,
0x92, 0xac, 0x1c, 0x40, 0x79, 0xad, 0xa1, 0x14, 0x1c, 0x13, 0xf8, 0xa4, 0x4e, 0x9a, 0x3b, 0xcc, 0x6f, 0x6e, 0x2d, 0x1b, 0x11, 0xcb, 0x93, 0x24, 0x73, 0xa0, 0xd4, 0x42, 0x57, 0xa0, 0xb0, 0x54,
0x62, 0xc3, 0x9d, 0x20, 0x4a, 0xdb, 0x65, 0x87, 0x59, 0xdc, 0xf8, 0x93, 0x4f, 0x7b, 0xd3, 0xe7, 0x92, 0xfd, 0xbf, 0x94, 0x25, 0x34, 0xa9, 0x9e, 0x49, 0x8f, 0xa7, 0xb2, 0xca, 0x3e, 0x94, 0x97,
0x50, 0xb6, 0xa0, 0x1f, 0x4f, 0x84, 0x95, 0x6d, 0x77, 0xee, 0xdd, 0x36, 0x0a, 0x4c, 0xaa, 0x5b, 0x43, 0x18, 0x03, 0x2d, 0x0e, 0x2c, 0x65, 0x47, 0xd9, 0xdd, 0xe0, 0x84, 0x63, 0xee, 0x04, 0x51,
0xba, 0x5e, 0xd4, 0xb6, 0x6e, 0x16, 0x35, 0xc2, 0x36, 0xf5, 0xf4, 0x51, 0xc6, 0xdb, 0x2f, 0xd9, 0xd0, 0xd8, 0x0d, 0x4e, 0xb8, 0xfe, 0x4d, 0x4d, 0x97, 0x61, 0x0f, 0xa1, 0x4c, 0xa0, 0x13, 0x0c,
0xfe, 0x1b, 0xa2, 0xf2, 0x25, 0x0f, 0x8e, 0x2d, 0xab, 0x41, 0xae, 0xdf, 0x4b, 0x66, 0xea, 0xde, 0x43, 0x2a, 0x5b, 0x6f, 0xfe, 0x93, 0xdf, 0xcc, 0x8e, 0x53, 0x2d, 0xe3, 0x6a, 0x56, 0x5b, 0xbb,
0x31, 0x7d, 0x7e, 0x2e, 0x6a, 0x6e, 0xd2, 0xa5, 0xc7, 0x72, 0xfd, 0x1e, 0xdd, 0x07, 0xf7, 0xdd, 0x9e, 0xd5, 0x14, 0xbe, 0xd2, 0xb3, 0xed, 0xcc, 0x6c, 0xcb, 0xa0, 0xfe, 0x2b, 0xa2, 0xf2, 0x4e,
0xe7, 0x18, 0x65, 0xbf, 0x97, 0x4c, 0xb9, 0xa9, 0x4a, 0x69, 0xb6, 0x02, 0xf4, 0x29, 0x38, 0x73, 0x05, 0x8d, 0x64, 0x35, 0x28, 0x74, 0xda, 0xc9, 0x4e, 0xad, 0xbf, 0xe2, 0x3e, 0x9f, 0x66, 0x35,
0x94, 0x43, 0x3f, 0x5f, 0x27, 0xcd, 0xbd, 0x8e, 0xff, 0x8f, 0x51, 0x83, 0x33, 0x94, 0xc3, 0x6e, 0x3d, 0xe9, 0xd2, 0xe6, 0x85, 0x4e, 0x9b, 0xed, 0x81, 0xfe, 0xe2, 0x6d, 0x80, 0xa2, 0xd3, 0x4e,
0x69, 0xb9, 0xa8, 0x39, 0x06, 0x31, 0x5b, 0x4f, 0x9f, 0x81, 0x7b, 0x38, 0x1e, 0x4b, 0x54, 0xca, 0xb6, 0x5c, 0xa9, 0x52, 0x9a, 0x2f, 0x00, 0xbb, 0x0f, 0xda, 0x14, 0x45, 0x8f, 0xae, 0xb3, 0xd5,
0x77, 0xec, 0x29, 0x77, 0x03, 0xf3, 0x16, 0x82, 0x94, 0xdc, 0x38, 0xa6, 0x04, 0x5b, 0x01, 0xfa, 0xb4, 0x7e, 0xb1, 0xaa, 0x7d, 0x86, 0xa2, 0xd7, 0x32, 0xe6, 0xb3, 0x9a, 0x16, 0x23, 0x4e, 0x7a,
0x02, 0x4a, 0xc7, 0x7c, 0x82, 0x9a, 0x5f, 0xa2, 0x5f, 0xb0, 0xd2, 0x07, 0xb7, 0x5d, 0x57, 0xd9, 0xf6, 0x00, 0xf4, 0x83, 0xc1, 0x40, 0xa0, 0x94, 0x96, 0x46, 0xaf, 0xdc, 0xb4, 0x63, 0xf3, 0xd8,
0xcc, 0x8e, 0xd6, 0x0a, 0x5a, 0x05, 0x18, 0xa0, 0x52, 0x5c, 0xc4, 0x6f, 0xf0, 0xca, 0x2f, 0xda, 0x29, 0xb9, 0x9a, 0x98, 0x12, 0x7c, 0x01, 0xd8, 0x23, 0x30, 0x8e, 0xfd, 0x21, 0x46, 0xfe, 0x05,
0x1d, 0x65, 0x18, 0x5a, 0x81, 0x92, 0x3d, 0x9a, 0xc9, 0xba, 0x36, 0xbb, 0x8e, 0x1b, 0xa7, 0x60, 0x5a, 0x45, 0x2a, 0xfd, 0x2f, 0x3f, 0x75, 0x91, 0xcd, 0xdc, 0x68, 0x59, 0xc1, 0xaa, 0x00, 0x5d,
0x4f, 0x40, 0x5d, 0xc8, 0x9f, 0xcc, 0xb4, 0xb7, 0x65, 0xc0, 0x6b, 0xd4, 0x1e, 0xa1, 0x25, 0x70, 0x94, 0xd2, 0x0f, 0x83, 0xa7, 0x78, 0x69, 0x95, 0xe8, 0x46, 0x19, 0x86, 0x55, 0xc0, 0xa0, 0xa7,
0xcc, 0xd3, 0xf1, 0x72, 0x14, 0xa0, 0x38, 0xc0, 0x50, 0x8e, 0xce, 0xbd, 0xbc, 0xc1, 0x3d, 0xbc, 0xc5, 0x59, 0x9d, 0xb2, 0xcb, 0xb8, 0x7e, 0x0a, 0xf4, 0x02, 0xa6, 0x83, 0x7a, 0x32, 0x89, 0xcc,
0x40, 0x8d, 0x9e, 0x43, 0xcb, 0x50, 0x60, 0x61, 0x1c, 0xa1, 0x57, 0xa0, 0xbb, 0x50, 0xb6, 0xf0, 0xb5, 0x18, 0x3c, 0xc1, 0xc8, 0x54, 0x98, 0x01, 0x5a, 0xec, 0x35, 0xb3, 0xc0, 0x00, 0x4a, 0x5d,
0x28, 0x54, 0xe7, 0x5e, 0xb1, 0xd1, 0x87, 0xdd, 0x5b, 0x63, 0x53, 0x1f, 0xdc, 0x97, 0x12, 0x43, 0x74, 0x45, 0xff, 0xdc, 0x54, 0x63, 0xdc, 0xc6, 0x11, 0x46, 0x68, 0x6a, 0xac, 0x0c, 0x45, 0xee,
0x8d, 0x63, 0x7b, 0x47, 0x0e, 0x5b, 0x85, 0x66, 0xf8, 0xb3, 0xf0, 0x82, 0x8f, 0xdf, 0xc7, 0x9a, 0x06, 0x1e, 0x9a, 0x45, 0xb6, 0x09, 0x65, 0x82, 0x47, 0xae, 0x3c, 0x37, 0x4b, 0xf5, 0x0e, 0x6c,
0x5f, 0xd8, 0xab, 0x71, 0x58, 0x86, 0xe9, 0x0e, 0xae, 0x97, 0x55, 0x72, 0xb3, 0xac, 0x92, 0x1f, 0xe6, 0xd6, 0x66, 0x16, 0xe8, 0x87, 0x02, 0xdd, 0x08, 0x07, 0xf4, 0x1b, 0x69, 0x7c, 0x11, 0xc6,
0xcb, 0x2a, 0xf9, 0xb5, 0xac, 0x92, 0xaf, 0xbf, 0xab, 0x5b, 0x1f, 0xf6, 0x33, 0x5f, 0x2a, 0x56, 0xcb, 0x9f, 0xb9, 0x23, 0x7f, 0xf0, 0x32, 0x88, 0xfc, 0x11, 0xfd, 0x34, 0x1a, 0xcf, 0x30, 0xf5,
0xd3, 0xd1, 0xa8, 0x35, 0xc6, 0x79, 0x3b, 0x46, 0x31, 0x51, 0xad, 0x70, 0xca, 0x5b, 0x91, 0x68, 0xef, 0x0a, 0x6c, 0xe5, 0x6d, 0xc9, 0xda, 0x3f, 0xfb, 0x69, 0xfb, 0x16, 0x0b, 0xff, 0xc6, 0x58,
0xa7, 0xfb, 0xfb, 0x96, 0xbb, 0xfb, 0x16, 0xc5, 0xab, 0x41, 0x70, 0x78, 0xd2, 0x0f, 0x06, 0x09, 0xd9, 0xab, 0x14, 0xf2, 0x57, 0xc9, 0x9b, 0x4e, 0xbd, 0x69, 0xba, 0x8b, 0xd4, 0x73, 0x15, 0x30,
0x37, 0x2c, 0xda, 0x9f, 0x76, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0xcb, 0xab, 0x17, 0xd9, 0xcb, 0x0e, 0x0e, 0x8f, 0xf9, 0x64, 0x44, 0xff, 0x5c, 0xea, 0xb0, 0x88, 0xff, 0xc4, 0x6e, 0xf9, 0x0b,
0x03, 0x00, 0x00, 0xa8, 0x37, 0x2f, 0xd0, 0xea, 0x5e, 0xcd, 0xab, 0xca, 0xf5, 0xbc, 0xaa, 0x7c, 0x9c, 0x57, 0x95,
0xcf, 0xf3, 0xaa, 0xf2, 0xfe, 0x4b, 0x75, 0xed, 0xd5, 0x5e, 0xe6, 0x2b, 0x14, 0xc8, 0x71, 0xbf,
0xdf, 0x18, 0xe0, 0xd4, 0x09, 0x30, 0x1c, 0xca, 0x86, 0x3b, 0xf6, 0x1b, 0x5e, 0xe8, 0xa4, 0x27,
0xf9, 0x50, 0xf8, 0xfb, 0x39, 0x86, 0x8f, 0xbb, 0xf6, 0xc1, 0x49, 0xc7, 0xee, 0x26, 0x5c, 0xaf,
0x44, 0x1f, 0xa7, 0xfd, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x49, 0xb9, 0x3e, 0xfe, 0x04,
0x00, 0x00,
} }
func (m *RequestVerificationHeader) Marshal() (dAtA []byte, err error) { func (m *RequestVerificationHeader) Marshal() (dAtA []byte, err error) {
@ -427,6 +554,18 @@ func (m *RequestVerificationHeader) MarshalToSizedBuffer(dAtA []byte) (int, erro
i -= len(m.XXX_unrecognized) i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized)
} }
if m.Bearer != nil {
{
size, err := m.Bearer.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintVerify(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.Token != nil { if m.Token != nil {
{ {
size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) size, err := m.Token.MarshalToSizedBuffer(dAtA[:i])
@ -664,6 +803,106 @@ func (m *TokenLifetime) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *BearerTokenMsg) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BearerTokenMsg) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BearerTokenMsg) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Signature) > 0 {
i -= len(m.Signature)
copy(dAtA[i:], m.Signature)
i = encodeVarintVerify(dAtA, i, uint64(len(m.Signature)))
i--
dAtA[i] = 0x1a
}
if len(m.OwnerKey) > 0 {
i -= len(m.OwnerKey)
copy(dAtA[i:], m.OwnerKey)
i = encodeVarintVerify(dAtA, i, uint64(len(m.OwnerKey)))
i--
dAtA[i] = 0x12
}
{
size, err := m.BearerTokenMsg_Info.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintVerify(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *BearerTokenMsg_Info) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BearerTokenMsg_Info) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BearerTokenMsg_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.ValidUntil != 0 {
i = encodeVarintVerify(dAtA, i, uint64(m.ValidUntil))
i--
dAtA[i] = 0x18
}
{
size := m.OwnerID.Size()
i -= size
if _, err := m.OwnerID.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintVerify(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.ACLRules) > 0 {
i -= len(m.ACLRules)
copy(dAtA[i:], m.ACLRules)
i = encodeVarintVerify(dAtA, i, uint64(len(m.ACLRules)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintVerify(dAtA []byte, offset int, v uint64) int { func encodeVarintVerify(dAtA []byte, offset int, v uint64) int {
offset -= sovVerify(v) offset -= sovVerify(v)
base := offset base := offset
@ -691,6 +930,10 @@ func (m *RequestVerificationHeader) Size() (n int) {
l = m.Token.Size() l = m.Token.Size()
n += 1 + l + sovVerify(uint64(l)) n += 1 + l + sovVerify(uint64(l))
} }
if m.Bearer != nil {
l = m.Bearer.Size()
n += 1 + l + sovVerify(uint64(l))
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -784,6 +1027,49 @@ func (m *TokenLifetime) Size() (n int) {
return n return n
} }
func (m *BearerTokenMsg) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.BearerTokenMsg_Info.Size()
n += 1 + l + sovVerify(uint64(l))
l = len(m.OwnerKey)
if l > 0 {
n += 1 + l + sovVerify(uint64(l))
}
l = len(m.Signature)
if l > 0 {
n += 1 + l + sovVerify(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BearerTokenMsg_Info) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ACLRules)
if l > 0 {
n += 1 + l + sovVerify(uint64(l))
}
l = m.OwnerID.Size()
n += 1 + l + sovVerify(uint64(l))
if m.ValidUntil != 0 {
n += 1 + sovVerify(uint64(m.ValidUntil))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovVerify(x uint64) (n int) { func sovVerify(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7 return (math_bits.Len64(x|1) + 6) / 7
} }
@ -889,6 +1175,42 @@ func (m *RequestVerificationHeader) Unmarshal(dAtA []byte) error {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Bearer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthVerify
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthVerify
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Bearer == nil {
m.Bearer = &BearerTokenMsg{}
}
if err := m.Bearer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipVerify(dAtA[iNdEx:]) skippy, err := skipVerify(dAtA[iNdEx:])
@ -1522,6 +1844,301 @@ func (m *TokenLifetime) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *BearerTokenMsg) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BearerTokenMsg: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BearerTokenMsg: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BearerTokenMsg_Info", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthVerify
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthVerify
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.BearerTokenMsg_Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OwnerKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthVerify
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthVerify
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OwnerKey = append(m.OwnerKey[:0], dAtA[iNdEx:postIndex]...)
if m.OwnerKey == nil {
m.OwnerKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthVerify
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthVerify
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...)
if m.Signature == nil {
m.Signature = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipVerify(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthVerify
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthVerify
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BearerTokenMsg_Info) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Info: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ACLRules", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthVerify
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthVerify
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ACLRules = append(m.ACLRules[:0], dAtA[iNdEx:postIndex]...)
if m.ACLRules == nil {
m.ACLRules = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OwnerID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthVerify
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthVerify
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.OwnerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ValidUntil", wireType)
}
m.ValidUntil = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVerify
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ValidUntil |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipVerify(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthVerify
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthVerify
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipVerify(dAtA []byte) (n int, err error) { func skipVerify(dAtA []byte) (n int, err error) {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

View file

@ -23,6 +23,9 @@ message RequestVerificationHeader {
// Token is a token of the session within which the request is sent // Token is a token of the session within which the request is sent
Token Token = 2; Token Token = 2;
// Bearer is a Bearer token of the request
BearerTokenMsg Bearer = 3;
} }
// User token granting rights for object manipulation // User token granting rights for object manipulation
@ -91,3 +94,26 @@ message TokenLifetime {
// uint32 Version = 2; // uint32 Version = 2;
// bytes Data = 3; // bytes Data = 3;
// } // }
// BearerTokenMsg carries information about request ACL rules with limited lifetime
message BearerTokenMsg {
message Info {
// ACLRules carries a binary representation of the table of extended ACL rules
bytes ACLRules = 1;
// OwnerID is an owner of token
bytes OwnerID = 2 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// ValidUntil carries a last epoch of token lifetime
uint64 ValidUntil = 3;
}
// TokenInfo is a grouped information about token
Info TokenInfo = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// OwnerKey is a public key of the token owner
bytes OwnerKey = 2;
// Signature is a signature of token information
bytes Signature = 3;
}

View file

@ -69,7 +69,7 @@ func BenchmarkSignDataWithSessionToken(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
require.NoError(b, SignDataWithSessionToken(key, req)) require.NoError(b, SignRequestData(key, req))
} }
} }
@ -91,14 +91,14 @@ func BenchmarkVerifyAccumulatedSignaturesWithToken(b *testing.B) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
key := test.DecodeKey(i) key := test.DecodeKey(i)
require.NoError(b, SignDataWithSessionToken(key, req)) require.NoError(b, SignRequestData(key, req))
} }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
require.NoError(b, VerifyAccumulatedSignaturesWithToken(req)) require.NoError(b, VerifyRequestData(req))
} }
} }
@ -115,3 +115,26 @@ func TestRequestVerificationHeader_SetToken(t *testing.T) {
require.Equal(t, token, h.GetToken()) require.Equal(t, token, h.GetToken())
} }
func TestRequestVerificationHeader_SetBearer(t *testing.T) {
aclRules := []byte{1, 2, 3}
token := new(BearerTokenMsg)
token.SetACLRules(aclRules)
h := new(RequestVerificationHeader)
h.SetBearer(token)
require.Equal(t, token, h.GetBearer())
}
func TestRequestVerificationHeader_GetBearerToken(t *testing.T) {
s := new(RequestVerificationHeader)
require.Nil(t, s.GetBearerToken())
bearer := new(BearerTokenMsg)
s.SetBearer(bearer)
require.Equal(t, bearer, s.GetBearerToken())
}

View file

@ -53,7 +53,7 @@ func (s gRPCCreator) Create(ctx context.Context, p CreateParamsSource) (CreateRe
req.SetExpirationEpoch(p.ExpirationEpoch()) req.SetExpirationEpoch(p.ExpirationEpoch())
// sign with private key // sign with private key
if err := service.SignDataWithSessionToken(s.key, req); err != nil { if err := service.SignRequestData(s.key, req); err != nil {
return nil, err return nil, err
} }

View file

@ -84,7 +84,7 @@ func TestGRPCCreator_Create(t *testing.T) {
require.Equal(t, ownerID, req.GetOwnerID()) require.Equal(t, ownerID, req.GetOwnerID())
require.Equal(t, created, req.CreationEpoch()) require.Equal(t, created, req.CreationEpoch())
require.Equal(t, expired, req.ExpirationEpoch()) require.Equal(t, expired, req.ExpirationEpoch())
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(req)) require.NoError(t, service.VerifyRequestData(req))
}, },
resp: &CreateResponse{ resp: &CreateResponse{
ID: TokenID{1, 2, 3}, ID: TokenID{1, 2, 3},

View file

@ -12,7 +12,7 @@ func TestRequestSign(t *testing.T) {
sk := test.DecodeKey(0) sk := test.DecodeKey(0)
type sigType interface { type sigType interface {
service.SignedDataWithToken service.RequestData
service.SignKeyPairAccumulator service.SignKeyPairAccumulator
service.SignKeyPairSource service.SignKeyPairSource
SetToken(*service.Token) SetToken(*service.Token)
@ -68,26 +68,26 @@ func TestRequestSign(t *testing.T) {
token := new(service.Token) token := new(service.Token)
v.SetToken(token) v.SetToken(token)
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1)) token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
{ // payload corruptions { // payload corruptions
for _, corruption := range item.payloadCorrupt { for _, corruption := range item.payloadCorrupt {
v := item.constructor() v := item.constructor()
require.NoError(t, service.SignDataWithSessionToken(sk, v)) require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.NoError(t, service.VerifyRequestData(v))
corruption(v) corruption(v)
require.Error(t, service.VerifyAccumulatedSignaturesWithToken(v)) require.Error(t, service.VerifyRequestData(v))
} }
} }
} }