Remove v1 code

This commit is contained in:
Alex Vanin 2020-08-12 09:37:59 +03:00 committed by Stanislav Bogatyrev
parent ed7879a89e
commit 0a5d0ff1a2
140 changed files with 0 additions and 45161 deletions

View file

@ -1,8 +0,0 @@
#!/bin/bash
CHEQUE=7849b02d01cc7f7734295fa815ea64ec4d2012e45b8781eb891723ba2703c53263e8d6e522dc32203339dcd8eee9c6b7439a00ea56fa00000000611e000000000000060003012d47e76210aec73be39ab3d186e0a40fe8d86bfa3d4fabfda57ba13b88f96a8ebe4360627a1326f13fb9516c0dbc4af90f116e44bd33f4d04a0d1633afa243ad4f2fa9cd933e7631a619b5132cec6983906aba757af5590434124b232a43e302f7528ce97c9a93558efe7d4f62577aabdf771c931f54a71be6ad21e7d9cc177744b4b9781cf0c29adb503f33d2df9f810ebf33a774849848984cf7e2bbebd48ef0cd8592fbf9b6aee1dc74803e31c95a02dbbd5fd9783f9ecbcbf444b5942f830368a6f5829fb2a34fa03d0308ae6b05f433f2904d9a852fed1f5d2eb598ca794770adb1ece9dccd1c7ad98f709cfb890e3bdd5973dcdd838111fae2efa4c3c09ea2133e5d7c6eac6ae24afcce46db7c9f4dc154f123c835adff4e0b7e19bcffda0340750b92789821683283bcb98e32b7e032b94f267b6964613fc31a7ce5813fddad8298f71dfdc4f9b3e353f969727af476d43b12a25727cf6b9c73ae7152266d995bec807068ad2156288c4d946aeb17ebca787d498a1b87b9dae1bcd935763403fef27f744e829131d0ec980829fafa51db1714c2761d9f78762c008c323e9d66db9b5086d355897e52fe065e14f1cc70334248349aa4c7a3e6e3dc8f8693b1511c73dc88e6d6e8b6c6c68de922f351b5b1543917af2f2a3588aebfbd1ff3fac6023e00f03a16e8707ce045eb42ee80d392451541ee510dc18e1c8befbac54d742648b58f379b5337d9b74c5a61afb8ef3db7f3eb0454d6823777b613a3ee22cd6ce47e4fa72170d49267b773cc09c123654e0bcd7278aa2ae1e7c85d049b557a3c
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
echo $CHEQUE | xxd -p -r > $DIR/cheque_data
exit 0

Binary file not shown.

View file

@ -1,46 +0,0 @@
package accounting
import (
"github.com/nspcc-dev/neofs-api-go/decimal"
"github.com/nspcc-dev/neofs-api-go/internal"
"github.com/nspcc-dev/neofs-api-go/refs"
)
type (
// OwnerID type alias.
OwnerID = refs.OwnerID
// Decimal type alias.
Decimal = decimal.Decimal
// Filter is used to filter accounts by criteria.
Filter func(acc *Account) bool
)
const (
// ErrEmptyAddress is raised when passed Address is empty.
ErrEmptyAddress = internal.Error("empty address")
// ErrEmptyLockTarget is raised when passed LockTarget is empty.
ErrEmptyLockTarget = internal.Error("empty lock target")
// ErrEmptyContainerID is raised when passed CID is empty.
ErrEmptyContainerID = internal.Error("empty container ID")
// ErrEmptyParentAddress is raised when passed ParentAddress is empty.
ErrEmptyParentAddress = internal.Error("empty parent address")
)
// SumFunds goes through all accounts and sums up active funds.
func SumFunds(accounts []*Account) (res *decimal.Decimal) {
res = decimal.Zero.Copy()
for i := range accounts {
if accounts[i] == nil {
continue
}
res = res.Add(accounts[i].ActiveFunds)
}
return
}

View file

@ -1,776 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: accounting/service.proto
package accounting
import (
context "context"
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
decimal "github.com/nspcc-dev/neofs-api-go/decimal"
service "github.com/nspcc-dev/neofs-api-go/service"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type BalanceRequest struct {
// OwnerID is a wallet address
OwnerID OwnerID `protobuf:"bytes,1,opt,name=OwnerID,proto3,customtype=OwnerID" json:"OwnerID"`
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader `protobuf:"bytes,98,opt,name=Meta,proto3,embedded=Meta" json:"Meta"`
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader `protobuf:"bytes,99,opt,name=Verify,proto3,embedded=Verify" json:"Verify"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BalanceRequest) Reset() { *m = BalanceRequest{} }
func (m *BalanceRequest) String() string { return proto.CompactTextString(m) }
func (*BalanceRequest) ProtoMessage() {}
func (*BalanceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7f9514b8f1d4c7fe, []int{0}
}
func (m *BalanceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *BalanceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_BalanceRequest.Merge(m, src)
}
func (m *BalanceRequest) XXX_Size() int {
return m.Size()
}
func (m *BalanceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_BalanceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_BalanceRequest proto.InternalMessageInfo
type BalanceResponse struct {
// Balance contains current account balance state
Balance *decimal.Decimal `protobuf:"bytes,1,opt,name=Balance,proto3" json:"Balance,omitempty"`
// LockAccounts contains information about locked funds. Locked funds appear
// when user pays for storage or withdraw assets.
LockAccounts []*Account `protobuf:"bytes,2,rep,name=LockAccounts,proto3" json:"LockAccounts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BalanceResponse) Reset() { *m = BalanceResponse{} }
func (m *BalanceResponse) String() string { return proto.CompactTextString(m) }
func (*BalanceResponse) ProtoMessage() {}
func (*BalanceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_7f9514b8f1d4c7fe, []int{1}
}
func (m *BalanceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *BalanceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_BalanceResponse.Merge(m, src)
}
func (m *BalanceResponse) XXX_Size() int {
return m.Size()
}
func (m *BalanceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_BalanceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_BalanceResponse proto.InternalMessageInfo
func (m *BalanceResponse) GetBalance() *decimal.Decimal {
if m != nil {
return m.Balance
}
return nil
}
func (m *BalanceResponse) GetLockAccounts() []*Account {
if m != nil {
return m.LockAccounts
}
return nil
}
func init() {
proto.RegisterType((*BalanceRequest)(nil), "accounting.BalanceRequest")
proto.RegisterType((*BalanceResponse)(nil), "accounting.BalanceResponse")
}
func init() { proto.RegisterFile("accounting/service.proto", fileDescriptor_7f9514b8f1d4c7fe) }
var fileDescriptor_7f9514b8f1d4c7fe = []byte{
// 399 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcd, 0x8e, 0xd3, 0x30,
0x18, 0x5c, 0x2f, 0x68, 0x17, 0xb9, 0x2b, 0x16, 0x99, 0x05, 0x45, 0x41, 0x4a, 0xab, 0x9e, 0x0a,
0x22, 0x8e, 0x14, 0x0e, 0x70, 0x6d, 0x54, 0x21, 0x56, 0xe2, 0x67, 0x15, 0x24, 0x24, 0xb8, 0x39,
0xee, 0xd7, 0x60, 0xb1, 0xb5, 0x43, 0xec, 0x04, 0xed, 0x9b, 0xf0, 0x0c, 0xbc, 0x02, 0x2f, 0xb0,
0xc7, 0x1e, 0x11, 0x87, 0x0a, 0x85, 0x17, 0x41, 0x75, 0x9c, 0xa6, 0x05, 0x4e, 0xb6, 0x67, 0xbe,
0x19, 0x8f, 0xc7, 0xd8, 0x63, 0x9c, 0xab, 0x4a, 0x1a, 0x21, 0xf3, 0x48, 0x43, 0x59, 0x0b, 0x0e,
0xb4, 0x28, 0x95, 0x51, 0x04, 0xf7, 0x8c, 0x4f, 0x1c, 0x15, 0x2d, 0xc1, 0xb0, 0x96, 0xf7, 0xcf,
0x3a, 0xac, 0x86, 0x52, 0x2c, 0xae, 0x1c, 0x7a, 0x6f, 0x0e, 0x5c, 0x2c, 0xd9, 0x65, 0xe4, 0x56,
0x07, 0xdf, 0xdf, 0xb9, 0xc6, 0x5c, 0x15, 0xa0, 0x1d, 0x1e, 0xe6, 0xc2, 0x7c, 0xac, 0x32, 0xca,
0xd5, 0x32, 0xca, 0x55, 0xae, 0x22, 0x0b, 0x67, 0xd5, 0xc2, 0x9e, 0xec, 0xc1, 0xee, 0xda, 0xf1,
0xf1, 0x77, 0x84, 0x6f, 0x27, 0xec, 0x92, 0x49, 0x0e, 0x29, 0x7c, 0xae, 0x40, 0x1b, 0xf2, 0x10,
0x1f, 0xbf, 0xf9, 0x22, 0xa1, 0x3c, 0x9f, 0x79, 0x68, 0x84, 0x26, 0x27, 0xc9, 0xe9, 0xf5, 0x7a,
0x78, 0xf0, 0x73, 0x3d, 0xec, 0xe0, 0xb4, 0xdb, 0x90, 0x67, 0xf8, 0xe6, 0x2b, 0x30, 0xcc, 0xcb,
0x46, 0x68, 0x32, 0x88, 0x7d, 0xda, 0xbd, 0xd7, 0x59, 0x6d, 0xb8, 0x17, 0xc0, 0xe6, 0x50, 0x26,
0xb7, 0x36, 0x1e, 0xab, 0xf5, 0x10, 0xa5, 0x56, 0x41, 0x66, 0xf8, 0xe8, 0x9d, 0x7d, 0xa5, 0xc7,
0xad, 0x76, 0xfc, 0xb7, 0xd6, 0xb2, 0x82, 0x33, 0x23, 0x94, 0xfc, 0xc7, 0xc3, 0x69, 0xc7, 0x35,
0x3e, 0xdd, 0x86, 0xd7, 0x85, 0x92, 0x1a, 0xc8, 0x23, 0x7c, 0xec, 0x20, 0x9b, 0x7e, 0x10, 0xdf,
0xa1, 0x5d, 0x71, 0xb3, 0x76, 0x4d, 0xbb, 0x01, 0xf2, 0x14, 0x9f, 0xbc, 0x54, 0xfc, 0xd3, 0xb4,
0x6d, 0x52, 0x7b, 0x87, 0xa3, 0x1b, 0x93, 0x41, 0x7c, 0x97, 0xf6, 0xd5, 0x52, 0xc7, 0xa5, 0x7b,
0x83, 0xf1, 0x05, 0xc6, 0xd3, 0xed, 0x0c, 0x49, 0xb6, 0x57, 0x12, 0x7f, 0x57, 0xbb, 0xdf, 0xab,
0xff, 0xe0, 0xbf, 0x5c, 0x1b, 0x3b, 0x79, 0x7f, 0xdd, 0x04, 0x68, 0xd5, 0x04, 0xe8, 0x47, 0x13,
0xa0, 0x5f, 0x4d, 0x80, 0xbe, 0xfe, 0x0e, 0x0e, 0x3e, 0x3c, 0xde, 0xf9, 0x4c, 0xa9, 0x0b, 0xce,
0xc3, 0x39, 0xd4, 0x91, 0x04, 0xb5, 0xd0, 0x21, 0x2b, 0x44, 0x98, 0xab, 0xa8, 0xf7, 0xfc, 0x76,
0x78, 0xf6, 0x1a, 0xd4, 0xf3, 0xb7, 0x74, 0x7a, 0x71, 0x4e, 0xfb, 0x78, 0xd9, 0x91, 0xfd, 0xe9,
0x27, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x81, 0x29, 0x02, 0x99, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// AccountingClient is the client API for Accounting service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AccountingClient interface {
// Balance returns current balance status of the NeoFS user
Balance(ctx context.Context, in *BalanceRequest, opts ...grpc.CallOption) (*BalanceResponse, error)
}
type accountingClient struct {
cc *grpc.ClientConn
}
func NewAccountingClient(cc *grpc.ClientConn) AccountingClient {
return &accountingClient{cc}
}
func (c *accountingClient) Balance(ctx context.Context, in *BalanceRequest, opts ...grpc.CallOption) (*BalanceResponse, error) {
out := new(BalanceResponse)
err := c.cc.Invoke(ctx, "/accounting.Accounting/Balance", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// AccountingServer is the server API for Accounting service.
type AccountingServer interface {
// Balance returns current balance status of the NeoFS user
Balance(context.Context, *BalanceRequest) (*BalanceResponse, error)
}
// UnimplementedAccountingServer can be embedded to have forward compatible implementations.
type UnimplementedAccountingServer struct {
}
func (*UnimplementedAccountingServer) Balance(ctx context.Context, req *BalanceRequest) (*BalanceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Balance not implemented")
}
func RegisterAccountingServer(s *grpc.Server, srv AccountingServer) {
s.RegisterService(&_Accounting_serviceDesc, srv)
}
func _Accounting_Balance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BalanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AccountingServer).Balance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/accounting.Accounting/Balance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AccountingServer).Balance(ctx, req.(*BalanceRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Accounting_serviceDesc = grpc.ServiceDesc{
ServiceName: "accounting.Accounting",
HandlerType: (*AccountingServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Balance",
Handler: _Accounting_Balance_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "accounting/service.proto",
}
func (m *BalanceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BalanceRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BalanceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
{
size, err := m.RequestVerificationHeader.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6
i--
dAtA[i] = 0x9a
{
size, err := m.RequestMetaHeader.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6
i--
dAtA[i] = 0x92
{
size := m.OwnerID.Size()
i -= size
if _, err := m.OwnerID.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *BalanceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BalanceResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BalanceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.LockAccounts) > 0 {
for iNdEx := len(m.LockAccounts) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.LockAccounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if m.Balance != nil {
{
size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintService(dAtA []byte, offset int, v uint64) int {
offset -= sovService(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *BalanceRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.OwnerID.Size()
n += 1 + l + sovService(uint64(l))
l = m.RequestMetaHeader.Size()
n += 2 + l + sovService(uint64(l))
l = m.RequestVerificationHeader.Size()
n += 2 + l + sovService(uint64(l))
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BalanceResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Balance != nil {
l = m.Balance.Size()
n += 1 + l + sovService(uint64(l))
}
if len(m.LockAccounts) > 0 {
for _, e := range m.LockAccounts {
l = e.Size()
n += 1 + l + sovService(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovService(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozService(x uint64) (n int) {
return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *BalanceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BalanceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BalanceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OwnerID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.OwnerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 98:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RequestMetaHeader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.RequestMetaHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 99:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RequestVerificationHeader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.RequestVerificationHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipService(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthService
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BalanceResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BalanceResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BalanceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Balance == nil {
m.Balance = &decimal.Decimal{}
}
if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LockAccounts", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LockAccounts = append(m.LockAccounts, &Account{})
if err := m.LockAccounts[len(m.LockAccounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipService(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthService
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipService(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthService
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupService
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthService
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowService = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group")
)

View file

@ -1,36 +0,0 @@
syntax = "proto3";
package accounting;
option go_package = "github.com/nspcc-dev/neofs-api-go/accounting";
option csharp_namespace = "NeoFS.API.Accounting";
import "service/meta.proto";
import "service/verify.proto";
import "decimal/decimal.proto";
import "accounting/types.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Accounting is a service that provides access for accounting balance
// information
service Accounting {
// Balance returns current balance status of the NeoFS user
rpc Balance(BalanceRequest) returns (BalanceResponse);
}
message BalanceRequest {
// OwnerID is a wallet address
bytes OwnerID = 1 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message BalanceResponse {
// Balance contains current account balance state
decimal.Decimal Balance = 1;
// LockAccounts contains information about locked funds. Locked funds appear
// when user pays for storage or withdraw assets.
repeated Account LockAccounts = 2;
}

View file

@ -1,167 +0,0 @@
package accounting
import (
"encoding/binary"
"io"
"github.com/nspcc-dev/neofs-api-go/service"
)
// SignedData returns payload bytes of the request.
func (m BalanceRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m BalanceRequest) SignedDataSize() int {
return m.GetOwnerID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m BalanceRequest) ReadSignedData(p []byte) (int, error) {
sz := m.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
copy(p, m.GetOwnerID().Bytes())
return sz, nil
}
// SignedData returns payload bytes of the request.
func (m GetRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m GetRequest) SignedDataSize() int {
return m.GetID().Size() + m.GetOwnerID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m GetRequest) ReadSignedData(p []byte) (int, error) {
sz := m.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetID().Bytes())
copy(p[off:], m.GetOwnerID().Bytes())
return sz, nil
}
// SignedData returns payload bytes of the request.
func (m PutRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m PutRequest) SignedDataSize() (sz int) {
sz += m.GetOwnerID().Size()
sz += m.GetMessageID().Size()
sz += 8
if amount := m.GetAmount(); amount != nil {
sz += amount.Size()
}
return
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m PutRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetOwnerID().Bytes())
off += copy(p[off:], m.GetMessageID().Bytes())
binary.BigEndian.PutUint64(p[off:], m.GetHeight())
off += 8
if amount := m.GetAmount(); amount != nil {
n, err := amount.MarshalTo(p[off:])
off += n
if err != nil {
return off + n, err
}
}
return off, nil
}
// SignedData returns payload bytes of the request.
func (m ListRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m ListRequest) SignedDataSize() int {
return m.GetOwnerID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m ListRequest) ReadSignedData(p []byte) (int, error) {
sz := m.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
copy(p, m.GetOwnerID().Bytes())
return sz, nil
}
// SignedData returns payload bytes of the request.
func (m DeleteRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m DeleteRequest) SignedDataSize() (sz int) {
sz += m.GetID().Size()
sz += m.GetOwnerID().Size()
sz += m.GetMessageID().Size()
return
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m DeleteRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetID().Bytes())
off += copy(p[off:], m.GetOwnerID().Bytes())
off += copy(p[off:], m.GetMessageID().Bytes())
return off, nil
}

View file

@ -1,185 +0,0 @@
package accounting
import (
"testing"
"github.com/nspcc-dev/neofs-api-go/decimal"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestSignBalanceRequest(t *testing.T) {
sk := test.DecodeKey(0)
type sigType interface {
service.RequestData
service.SignKeyPairAccumulator
service.SignKeyPairSource
SetToken(*service.Token)
}
items := []struct {
constructor func() sigType
payloadCorrupt []func(sigType)
}{
{ // BalanceRequest
constructor: func() sigType {
return new(BalanceRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*BalanceRequest)
owner := req.GetOwnerID()
owner[0]++
req.SetOwnerID(owner)
},
},
},
{ // GetRequest
constructor: func() sigType {
return new(GetRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*GetRequest)
id, err := NewChequeID()
require.NoError(t, err)
req.SetID(id)
},
func(s sigType) {
req := s.(*GetRequest)
id := req.GetOwnerID()
id[0]++
req.SetOwnerID(id)
},
},
},
{ // PutRequest
constructor: func() sigType {
req := new(PutRequest)
amount := decimal.New(1)
req.SetAmount(amount)
return req
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*PutRequest)
owner := req.GetOwnerID()
owner[0]++
req.SetOwnerID(owner)
},
func(s sigType) {
req := s.(*PutRequest)
mid := req.GetMessageID()
mid[0]++
req.SetMessageID(mid)
},
func(s sigType) {
req := s.(*PutRequest)
req.SetHeight(req.GetHeight() + 1)
},
func(s sigType) {
req := s.(*PutRequest)
amount := req.GetAmount()
if amount == nil {
req.SetAmount(decimal.New(0))
} else {
req.SetAmount(amount.Add(decimal.New(amount.GetValue())))
}
},
},
},
{ // ListRequest
constructor: func() sigType {
return new(ListRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*ListRequest)
owner := req.GetOwnerID()
owner[0]++
req.SetOwnerID(owner)
},
},
},
{
constructor: func() sigType {
return new(DeleteRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*DeleteRequest)
id, err := NewChequeID()
require.NoError(t, err)
req.SetID(id)
},
func(s sigType) {
req := s.(*DeleteRequest)
owner := req.GetOwnerID()
owner[0]++
req.SetOwnerID(owner)
},
func(s sigType) {
req := s.(*DeleteRequest)
mid := req.GetMessageID()
mid[0]++
req.SetMessageID(mid)
},
},
},
}
for _, item := range items {
{ // token corruptions
v := item.constructor()
token := new(service.Token)
v.SetToken(token)
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyRequestData(v))
}
{ // payload corruptions
for _, corruption := range item.payloadCorrupt {
v := item.constructor()
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
corruption(v)
require.Error(t, service.VerifyRequestData(v))
}
}
}
}

View file

@ -1,453 +0,0 @@
package accounting
import (
"crypto/ecdsa"
"crypto/rand"
"encoding/binary"
"reflect"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neofs-api-go/chain"
"github.com/nspcc-dev/neofs-api-go/decimal"
"github.com/nspcc-dev/neofs-api-go/internal"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/pkg/errors"
)
type (
// Cheque structure that describes a user request for withdrawal of funds.
Cheque struct {
ID ChequeID
Owner refs.OwnerID
Amount *decimal.Decimal
Height uint64
Signatures []ChequeSignature
}
// BalanceReceiver interface that is used to retrieve user balance by address.
BalanceReceiver interface {
Balance(accountAddress string) (*Account, error)
}
// ChequeID is identifier of user request for withdrawal of funds.
ChequeID string
// CID type alias.
CID = refs.CID
// SGID type alias.
SGID = refs.SGID
// ChequeSignature contains public key and hash, and is used to verify signatures.
ChequeSignature struct {
Key *ecdsa.PublicKey
Hash []byte
}
)
const (
// ErrWrongSignature is raised when wrong signature is passed.
ErrWrongSignature = internal.Error("wrong signature")
// ErrWrongPublicKey is raised when wrong public key is passed.
ErrWrongPublicKey = internal.Error("wrong public key")
// ErrWrongChequeData is raised when passed bytes cannot not be parsed as valid Cheque.
ErrWrongChequeData = internal.Error("wrong cheque data")
// ErrInvalidLength is raised when passed bytes cannot not be parsed as valid ChequeID.
ErrInvalidLength = internal.Error("invalid length")
u16size = 2
u64size = 8
signaturesOffset = chain.AddressLength + refs.OwnerIDSize + u64size + u64size
)
// NewChequeID generates valid random ChequeID using crypto/rand.Reader.
func NewChequeID() (ChequeID, error) {
d := make([]byte, chain.AddressLength)
if _, err := rand.Read(d); err != nil {
return "", err
}
id := base58.Encode(d)
return ChequeID(id), nil
}
// String returns string representation of ChequeID.
func (b ChequeID) String() string { return string(b) }
// Empty returns true, if ChequeID is empty.
func (b ChequeID) Empty() bool { return len(b) == 0 }
// Valid validates ChequeID.
func (b ChequeID) Valid() bool {
d, err := base58.Decode(string(b))
return err == nil && len(d) == chain.AddressLength
}
// Bytes returns bytes representation of ChequeID.
func (b ChequeID) Bytes() []byte {
d, err := base58.Decode(string(b))
if err != nil {
return make([]byte, chain.AddressLength)
}
return d
}
// Equal checks that current ChequeID is equal to passed ChequeID.
func (b ChequeID) Equal(b2 ChequeID) bool {
return b.Valid() && b2.Valid() && string(b) == string(b2)
}
// Unmarshal tries to parse []byte into valid ChequeID.
func (b *ChequeID) Unmarshal(data []byte) error {
*b = ChequeID(base58.Encode(data))
if !b.Valid() {
return ErrInvalidLength
}
return nil
}
// Size returns size (chain.AddressLength).
func (b ChequeID) Size() int {
return chain.AddressLength
}
// MarshalTo tries to marshal ChequeID into passed bytes and returns
// count of copied bytes or error, if bytes len is not enough to contain ChequeID.
func (b ChequeID) MarshalTo(data []byte) (int, error) {
if len(data) < chain.AddressLength {
return 0, ErrInvalidLength
}
return copy(data, b.Bytes()), nil
}
// Equals checks that m and tx are valid and equal Tx values.
func (m Tx) Equals(tx Tx) bool {
return m.From == tx.From &&
m.To == tx.To &&
m.Type == tx.Type &&
m.Amount == tx.Amount
}
// Verify validates current Cheque and Signatures that are generated for current Cheque.
func (b Cheque) Verify() error {
data := b.marshalBody()
for i, sign := range b.Signatures {
if err := crypto.VerifyRFC6979(sign.Key, data, sign.Hash); err != nil {
return errors.Wrapf(ErrWrongSignature, "item #%d: %s", i, err.Error())
}
}
return nil
}
// Sign is used to sign current Cheque and stores result inside b.Signatures.
func (b *Cheque) Sign(key *ecdsa.PrivateKey) error {
hash, err := crypto.SignRFC6979(key, b.marshalBody())
if err != nil {
return err
}
b.Signatures = append(b.Signatures, ChequeSignature{
Key: &key.PublicKey,
Hash: hash,
})
return nil
}
func (b *Cheque) marshalBody() []byte {
buf := make([]byte, signaturesOffset)
var offset int
offset += copy(buf, b.ID.Bytes())
offset += copy(buf[offset:], b.Owner.Bytes())
binary.LittleEndian.PutUint64(buf[offset:], uint64(b.Amount.Value))
offset += u64size
binary.LittleEndian.PutUint64(buf[offset:], b.Height)
return buf
}
func (b *Cheque) unmarshalBody(buf []byte) error {
var offset int
if len(buf) < signaturesOffset {
return ErrWrongChequeData
}
{ // unmarshal UUID
if err := b.ID.Unmarshal(buf[offset : offset+chain.AddressLength]); err != nil {
return err
}
offset += chain.AddressLength
}
{ // unmarshal OwnerID
if err := b.Owner.Unmarshal(buf[offset : offset+refs.OwnerIDSize]); err != nil {
return err
}
offset += refs.OwnerIDSize
}
{ // unmarshal amount
amount := int64(binary.LittleEndian.Uint64(buf[offset:]))
b.Amount = decimal.New(amount)
offset += u64size
}
{ // unmarshal height
b.Height = binary.LittleEndian.Uint64(buf[offset:])
offset += u64size
}
return nil
}
// MarshalBinary is used to marshal Cheque into bytes.
func (b Cheque) MarshalBinary() ([]byte, error) {
var (
count = len(b.Signatures)
buf = make([]byte, b.Size())
offset = copy(buf, b.marshalBody())
)
binary.LittleEndian.PutUint16(buf[offset:], uint16(count))
offset += u16size
for _, sign := range b.Signatures {
key := crypto.MarshalPublicKey(sign.Key)
offset += copy(buf[offset:], key)
offset += copy(buf[offset:], sign.Hash)
}
return buf, nil
}
// Size returns size of Cheque (count of bytes needs to store it).
func (b Cheque) Size() int {
return signaturesOffset + u16size +
len(b.Signatures)*(crypto.PublicKeyCompressedSize+crypto.RFC6979SignatureSize)
}
// UnmarshalBinary tries to parse []byte into valid Cheque.
func (b *Cheque) UnmarshalBinary(buf []byte) error {
if err := b.unmarshalBody(buf); err != nil {
return err
}
body := buf[:signaturesOffset]
count := int64(binary.LittleEndian.Uint16(buf[signaturesOffset:]))
offset := signaturesOffset + u16size
if ln := count * int64(crypto.PublicKeyCompressedSize+crypto.RFC6979SignatureSize); ln > int64(len(buf[offset:])) {
return ErrWrongChequeData
}
for i := int64(0); i < count; i++ {
sign := ChequeSignature{
Key: crypto.UnmarshalPublicKey(buf[offset : offset+crypto.PublicKeyCompressedSize]),
Hash: make([]byte, crypto.RFC6979SignatureSize),
}
offset += crypto.PublicKeyCompressedSize
if sign.Key == nil {
return errors.Wrapf(ErrWrongPublicKey, "item #%d", i)
}
offset += copy(sign.Hash, buf[offset:offset+crypto.RFC6979SignatureSize])
if err := crypto.VerifyRFC6979(sign.Key, body, sign.Hash); err != nil {
return errors.Wrapf(ErrWrongSignature, "item #%d: %s (offset=%d, len=%d)", i, err.Error(), offset, len(sign.Hash))
}
b.Signatures = append(b.Signatures, sign)
}
return nil
}
// ErrNotEnoughFunds generates error using address and amounts.
func ErrNotEnoughFunds(addr string, needed, residue *decimal.Decimal) error {
return errors.Errorf("not enough funds (requested=%s, residue=%s, addr=%s", needed, residue, addr)
}
func (m *Account) hasLockAcc(addr string) bool {
for i := range m.LockAccounts {
if m.LockAccounts[i].Address == addr {
return true
}
}
return false
}
// ValidateLock checks that account can be locked.
func (m *Account) ValidateLock() error {
switch {
case m.Address == "":
return ErrEmptyAddress
case m.ParentAddress == "":
return ErrEmptyParentAddress
case m.LockTarget == nil:
return ErrEmptyLockTarget
}
switch v := m.LockTarget.Target.(type) {
case *LockTarget_WithdrawTarget:
if v.WithdrawTarget.Cheque != m.Address {
return errors.Errorf("wrong cheque ID: expected %s, has %s", m.Address, v.WithdrawTarget.Cheque)
}
case *LockTarget_ContainerCreateTarget:
switch {
case v.ContainerCreateTarget.CID.Empty():
return ErrEmptyContainerID
}
}
return nil
}
// CanLock checks possibility to lock funds.
func (m *Account) CanLock(lockAcc *Account) error {
switch {
case m.ActiveFunds.LT(lockAcc.ActiveFunds):
return ErrNotEnoughFunds(lockAcc.ParentAddress, lockAcc.ActiveFunds, m.ActiveFunds)
case m.hasLockAcc(lockAcc.Address):
return errors.Errorf("could not lock account(%s) funds: duplicating lock(%s)", m.Address, lockAcc.Address)
default:
return nil
}
}
// LockForWithdraw checks that account contains locked funds by passed ChequeID.
func (m *Account) LockForWithdraw(chequeID string) bool {
switch v := m.LockTarget.Target.(type) {
case *LockTarget_WithdrawTarget:
return v.WithdrawTarget.Cheque == chequeID
}
return false
}
// LockForContainerCreate checks that account contains locked funds for container creation.
func (m *Account) LockForContainerCreate(cid refs.CID) bool {
switch v := m.LockTarget.Target.(type) {
case *LockTarget_ContainerCreateTarget:
return v.ContainerCreateTarget.CID.Equal(cid)
}
return false
}
// Equal checks that current Settlement is equal to passed Settlement.
func (m *Settlement) Equal(s *Settlement) bool {
if s == nil || m.Epoch != s.Epoch || len(m.Transactions) != len(s.Transactions) {
return false
}
return len(m.Transactions) == 0 || reflect.DeepEqual(m.Transactions, s.Transactions)
}
// GetOwnerID is an OwnerID field getter.
func (m BalanceRequest) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *BalanceRequest) SetOwnerID(owner OwnerID) {
m.OwnerID = owner
}
// GetID is an ID field getter.
func (m GetRequest) GetID() ChequeID {
return m.ID
}
// SetID is an ID field setter.
func (m *GetRequest) SetID(id ChequeID) {
m.ID = id
}
// GetOwnerID is an OwnerID field getter.
func (m GetRequest) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *GetRequest) SetOwnerID(id OwnerID) {
m.OwnerID = id
}
// GetOwnerID is an OwnerID field getter.
func (m PutRequest) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *PutRequest) SetOwnerID(id OwnerID) {
m.OwnerID = id
}
// GetMessageID is a MessageID field getter.
func (m PutRequest) GetMessageID() MessageID {
return m.MessageID
}
// SetMessageID is a MessageID field setter.
func (m *PutRequest) SetMessageID(id MessageID) {
m.MessageID = id
}
// SetAmount is an Amount field setter.
func (m *PutRequest) SetAmount(amount *decimal.Decimal) {
m.Amount = amount
}
// SetHeight is a Height field setter.
func (m *PutRequest) SetHeight(h uint64) {
m.Height = h
}
// GetOwnerID is an OwnerID field getter.
func (m ListRequest) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *ListRequest) SetOwnerID(id OwnerID) {
m.OwnerID = id
}
// GetID is an ID field getter.
func (m DeleteRequest) GetID() ChequeID {
return m.ID
}
// SetID is an ID field setter.
func (m *DeleteRequest) SetID(id ChequeID) {
m.ID = id
}
// GetOwnerID is an OwnerID field getter.
func (m DeleteRequest) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *DeleteRequest) SetOwnerID(id OwnerID) {
m.OwnerID = id
}
// GetMessageID is a MessageID field getter.
func (m DeleteRequest) GetMessageID() MessageID {
return m.MessageID
}
// SetMessageID is a MessageID field setter.
func (m *DeleteRequest) SetMessageID(id MessageID) {
m.MessageID = id
}

File diff suppressed because it is too large Load diff

View file

@ -1,125 +0,0 @@
syntax = "proto3";
package accounting;
option go_package = "github.com/nspcc-dev/neofs-api-go/accounting";
option csharp_namespace = "NeoFS.API.Accounting";
import "decimal/decimal.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
message Account {
// OwnerID is a wallet address
bytes OwnerID = 1 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// Address is identifier of accounting record
string Address = 2;
// ParentAddress is identifier of parent accounting record
string ParentAddress = 3;
// ActiveFunds is amount of active (non locked) funds for account
decimal.Decimal ActiveFunds = 4;
// Lifetime is time until account is valid (used for lock accounts)
Lifetime Lifetime = 5 [(gogoproto.nullable) = false];
// LockTarget is the purpose of lock funds (it might be withdraw or payment for storage)
LockTarget LockTarget = 6;
// LockAccounts contains child accounts with locked funds
repeated Account LockAccounts = 7;
}
// LockTarget must be one of two options
message LockTarget {
oneof Target {
// WithdrawTarget used when user requested withdraw
WithdrawTarget WithdrawTarget = 1;
// ContainerCreateTarget used when user requested creation of container
ContainerCreateTarget ContainerCreateTarget = 2;
}
}
message Balances {
// Accounts contains multiple account snapshots
repeated Account Accounts = 1 [(gogoproto.nullable) = false];
}
message PayIO {
// BlockID contains id of the NEO block where withdraw or deposit
// call was invoked
uint64 BlockID = 1;
// Transactions contains all transactions that founded in block
// and used for PayIO
repeated Tx Transactions = 2 [(gogoproto.nullable) = false];
}
message Lifetime {
// Unit can be Unlimited, based on NeoFS epoch or Neo block
enum Unit {
Unlimited = 0;
NeoFSEpoch = 1;
NeoBlock = 2;
}
// Unit describes how lifetime is measured in account
Unit unit = 1 [(gogoproto.customname) = "Unit"];
// Value describes how long lifetime will be valid
int64 Value = 2;
}
message Tx {
// Type can be withdrawal, payIO or inner
enum Type {
Unknown = 0;
Withdraw = 1;
PayIO = 2;
Inner = 3;
}
// Type describes target of transaction
Type type = 1 [(gogoproto.customname) = "Type"];
// From describes sender of funds
string From = 2;
// To describes receiver of funds
string To = 3;
// Amount describes amount of funds
decimal.Decimal Amount = 4;
// PublicKeys contains public key of sender
bytes PublicKeys = 5;
}
message Settlement {
message Receiver {
// To is the address of funds recipient
string To = 1;
// Amount is the amount of funds that will be sent
decimal.Decimal Amount = 2;
}
message Container {
// CID is container identifier
bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
// SGIDs is a set of storage groups that successfully passed the audit
repeated bytes SGIDs = 2 [(gogoproto.customtype) = "SGID", (gogoproto.nullable) = false];
}
message Tx {
// From is the address of the sender of funds
string From = 1;
// Container that successfully had passed the audit
Container Container = 2 [(gogoproto.nullable) = false];
// Receivers is a set of addresses of funds recipients
repeated Receiver Receivers = 3 [(gogoproto.nullable) = false];
}
// Epoch contains an epoch when settlement was accepted
uint64 Epoch = 1;
// Transactions is a set of transactions
repeated Tx Transactions = 2;
}
message ContainerCreateTarget {
// CID is container identifier
bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
}
message WithdrawTarget {
// Cheque is a string representation of cheque id
string Cheque = 1;
}

View file

@ -1,193 +0,0 @@
package accounting
import (
"io/ioutil"
"testing"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neofs-api-go/chain"
"github.com/nspcc-dev/neofs-api-go/decimal"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestCheque(t *testing.T) {
t.Run("new/valid", func(t *testing.T) {
id, err := NewChequeID()
require.NoError(t, err)
require.True(t, id.Valid())
d := make([]byte, chain.AddressLength+1)
// expected size + 1 byte
str := base58.Encode(d)
require.False(t, ChequeID(str).Valid())
// expected size - 1 byte
str = base58.Encode(d[:len(d)-2])
require.False(t, ChequeID(str).Valid())
// wrong encoding
d = d[:len(d)-1] // normal size
require.False(t, ChequeID(string(d)).Valid())
})
t.Run("marshal/unmarshal", func(t *testing.T) {
b2 := new(Cheque)
key1 := test.DecodeKey(0)
key2 := test.DecodeKey(1)
id, err := NewChequeID()
require.NoError(t, err)
owner, err := refs.NewOwnerID(&key1.PublicKey)
require.NoError(t, err)
b1 := &Cheque{
ID: id,
Owner: owner,
Height: 100,
Amount: decimal.NewGAS(100),
}
require.NoError(t, b1.Sign(key1))
require.NoError(t, b1.Sign(key2))
data, err := b1.MarshalBinary()
require.NoError(t, err)
require.Len(t, data, b1.Size())
require.NoError(t, b2.UnmarshalBinary(data))
require.Equal(t, b1, b2)
require.NoError(t, b1.Verify())
require.NoError(t, b2.Verify())
})
t.Run("example from SC", func(t *testing.T) {
pathToCheque := "fixtures/cheque_data"
expect, err := ioutil.ReadFile(pathToCheque)
require.NoError(t, err)
var cheque Cheque
require.NoError(t, cheque.UnmarshalBinary(expect))
actual, err := cheque.MarshalBinary()
require.NoError(t, err)
require.Equal(t, expect, actual)
require.NoError(t, cheque.Verify())
require.Equal(t, cheque.Height, uint64(7777))
require.Equal(t, cheque.Amount, decimal.NewGAS(42))
})
}
func TestBalanceRequest_SetOwnerID(t *testing.T) {
ownerID := OwnerID{1, 2, 3}
m := new(BalanceRequest)
m.SetOwnerID(ownerID)
require.Equal(t, ownerID, m.GetOwnerID())
}
func TestGetRequestGettersSetters(t *testing.T) {
t.Run("id", func(t *testing.T) {
id := ChequeID("test id")
m := new(GetRequest)
m.SetID(id)
require.Equal(t, id, m.GetID())
})
t.Run("owner", func(t *testing.T) {
id := OwnerID{1, 2, 3}
m := new(GetRequest)
m.SetOwnerID(id)
require.Equal(t, id, m.GetOwnerID())
})
}
func TestPutRequestGettersSetters(t *testing.T) {
t.Run("owner", func(t *testing.T) {
id := OwnerID{1, 2, 3}
m := new(PutRequest)
m.SetOwnerID(id)
require.Equal(t, id, m.GetOwnerID())
})
t.Run("message ID", func(t *testing.T) {
id, err := refs.NewUUID()
require.NoError(t, err)
m := new(PutRequest)
m.SetMessageID(id)
require.Equal(t, id, m.GetMessageID())
})
t.Run("amount", func(t *testing.T) {
amount := decimal.New(1)
m := new(PutRequest)
m.SetAmount(amount)
require.Equal(t, amount, m.GetAmount())
})
t.Run("height", func(t *testing.T) {
h := uint64(3)
m := new(PutRequest)
m.SetHeight(h)
require.Equal(t, h, m.GetHeight())
})
}
func TestListRequestGettersSetters(t *testing.T) {
ownerID := OwnerID{1, 2, 3}
m := new(ListRequest)
m.SetOwnerID(ownerID)
require.Equal(t, ownerID, m.GetOwnerID())
}
func TestDeleteRequestGettersSetters(t *testing.T) {
t.Run("id", func(t *testing.T) {
id := ChequeID("test id")
m := new(DeleteRequest)
m.SetID(id)
require.Equal(t, id, m.GetID())
})
t.Run("owner", func(t *testing.T) {
id := OwnerID{1, 2, 3}
m := new(DeleteRequest)
m.SetOwnerID(id)
require.Equal(t, id, m.GetOwnerID())
})
t.Run("message ID", func(t *testing.T) {
id, err := refs.NewUUID()
require.NoError(t, err)
m := new(DeleteRequest)
m.SetMessageID(id)
require.Equal(t, id, m.GetMessageID())
})
}

View file

@ -1,35 +0,0 @@
package accounting
import (
"encoding/binary"
"github.com/nspcc-dev/neofs-api-go/refs"
)
type (
// MessageID type alias.
MessageID = refs.MessageID
)
// PrepareData prepares bytes representation of PutRequest to satisfy SignedRequest interface.
func (m *PutRequest) PrepareData() ([]byte, error) {
var offset int
// MessageID-len + OwnerID-len + Amount + Height
buf := make([]byte, refs.UUIDSize+refs.OwnerIDSize+binary.MaxVarintLen64+binary.MaxVarintLen64)
offset += copy(buf[offset:], m.MessageID.Bytes())
offset += copy(buf[offset:], m.OwnerID.Bytes())
offset += binary.PutVarint(buf[offset:], m.Amount.Value)
binary.PutUvarint(buf[offset:], m.Height)
return buf, nil
}
// PrepareData prepares bytes representation of DeleteRequest to satisfy SignedRequest interface.
func (m *DeleteRequest) PrepareData() ([]byte, error) {
var offset int
// ID-len + OwnerID-len + MessageID-len
buf := make([]byte, refs.UUIDSize+refs.OwnerIDSize+refs.UUIDSize)
offset += copy(buf[offset:], m.ID.Bytes())
offset += copy(buf[offset:], m.OwnerID.Bytes())
copy(buf[offset:], m.MessageID.Bytes())
return buf, nil
}

File diff suppressed because it is too large Load diff

View file

@ -1,101 +0,0 @@
syntax = "proto3";
package accounting;
option go_package = "github.com/nspcc-dev/neofs-api-go/accounting";
option csharp_namespace = "NeoFS.API.Accounting";
import "service/meta.proto";
import "service/verify.proto";
import "decimal/decimal.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Withdraw is a service that provides withdraw assets operations from the NeoFS
service Withdraw {
// Get returns cheque if it was signed by inner ring nodes
rpc Get(GetRequest) returns (GetResponse);
// Put ask inner ring nodes to sign a cheque for withdraw invoke
rpc Put(PutRequest) returns (PutResponse);
// List shows all user's checks
rpc List(ListRequest) returns (ListResponse);
// Delete allows user to remove unused cheque
rpc Delete(DeleteRequest) returns (DeleteResponse);
}
message Item {
// ID is a cheque identifier
bytes ID = 1 [(gogoproto.customtype) = "ChequeID", (gogoproto.nullable) = false];
// OwnerID is a wallet address
bytes OwnerID = 2 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// Amount of funds
decimal.Decimal Amount = 3;
// Height is the neo blockchain height until the cheque is valid
uint64 Height = 4;
// Payload contains cheque representation in bytes
bytes Payload = 5;
}
message GetRequest {
// ID is cheque identifier
bytes ID = 1 [(gogoproto.customtype) = "ChequeID", (gogoproto.nullable) = false];
// OwnerID is a wallet address
bytes OwnerID = 2 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetResponse {
// Withdraw is cheque with meta information
Item Withdraw = 1;
}
message PutRequest {
// OwnerID is a wallet address
bytes OwnerID = 1 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// Amount of funds
decimal.Decimal Amount = 2;
// Height is the neo blockchain height until the cheque is valid
uint64 Height = 3;
// MessageID is a nonce for uniq request (UUIDv4)
bytes MessageID = 4 [(gogoproto.customtype) = "MessageID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message PutResponse {
// ID is cheque identifier
bytes ID = 1 [(gogoproto.customtype) = "ChequeID", (gogoproto.nullable) = false];
}
message ListRequest {
// OwnerID is a wallet address
bytes OwnerID = 1 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message ListResponse {
// Items is a set of cheques with meta information
repeated Item Items = 1;
}
message DeleteRequest {
// ID is cheque identifier
bytes ID = 1 [(gogoproto.customtype) = "ChequeID", (gogoproto.nullable) = false];
// OwnerID is a wallet address
bytes OwnerID = 2 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// MessageID is a nonce for uniq request (UUIDv4)
bytes MessageID = 3 [(gogoproto.customtype) = "MessageID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
// DeleteResponse is empty
message DeleteResponse {}

View file

@ -1,126 +0,0 @@
package eacl
const (
// MatchUnknown is a MatchType value used to mark value as undefined.
// Most of the tools consider MatchUnknown as incalculable.
// Using MatchUnknown in HeaderFilter is unsafe.
MatchUnknown MatchType = iota
// StringEqual is a MatchType of string equality.
StringEqual
// StringNotEqual is a MatchType of string inequality.
StringNotEqual
)
const (
// ActionUnknown is Action used to mark value as undefined.
// Most of the tools consider ActionUnknown as incalculable.
// Using ActionUnknown in Record is unsafe.
ActionUnknown Action = iota
// ActionAllow is Action used to mark an applicability of ACL rule.
ActionAllow
// ActionDeny is Action used to mark an inapplicability of ACL rule.
ActionDeny
)
const (
// GroupUnknown is a Group value used to mark value as undefined.
// Most of the tools consider GroupUnknown as incalculable.
// Using GroupUnknown in Target is unsafe.
GroupUnknown Group = iota
// GroupUser is a Group value for User access group.
GroupUser
// GroupSystem is a Group value for System access group.
GroupSystem
// GroupOthers is a Group value for Others access group.
GroupOthers
)
const (
// HdrTypeUnknown is a HeaderType value used to mark value as undefined.
// Most of the tools consider HdrTypeUnknown as incalculable.
// Using HdrTypeUnknown in HeaderFilter is unsafe.
HdrTypeUnknown HeaderType = iota
// HdrTypeRequest is a HeaderType for request header.
HdrTypeRequest
// HdrTypeObjSys is a HeaderType for system headers of object.
HdrTypeObjSys
// HdrTypeObjUsr is a HeaderType for user headers of object.
HdrTypeObjUsr
)
const (
// OpTypeUnknown is a OperationType value used to mark value as undefined.
// Most of the tools consider OpTypeUnknown as incalculable.
// Using OpTypeUnknown in Record is unsafe.
OpTypeUnknown OperationType = iota
// OpTypeGet is an OperationType for object.Get RPC
OpTypeGet
// OpTypePut is an OperationType for object.Put RPC
OpTypePut
// OpTypeHead is an OperationType for object.Head RPC
OpTypeHead
// OpTypeSearch is an OperationType for object.Search RPC
OpTypeSearch
// OpTypeDelete is an OperationType for object.Delete RPC
OpTypeDelete
// OpTypeRange is an OperationType for object.GetRange RPC
OpTypeRange
// OpTypeRangeHash is an OperationType for object.GetRangeHash RPC
OpTypeRangeHash
)
const (
// HdrObjSysNameID is a name of ID field in system header of object.
HdrObjSysNameID = "ID"
// HdrObjSysNameCID is a name of CID field in system header of object.
HdrObjSysNameCID = "CID"
// HdrObjSysNameOwnerID is a name of OwnerID field in system header of object.
HdrObjSysNameOwnerID = "OWNER_ID"
// HdrObjSysNameVersion is a name of Version field in system header of object.
HdrObjSysNameVersion = "VERSION"
// HdrObjSysNamePayloadLength is a name of PayloadLength field in system header of object.
HdrObjSysNamePayloadLength = "PAYLOAD_LENGTH"
// HdrObjSysNameCreatedUnix is a name of CreatedAt.UnitTime field in system header of object.
HdrObjSysNameCreatedUnix = "CREATED_UNIX"
// HdrObjSysNameCreatedEpoch is a name of CreatedAt.Epoch field in system header of object.
HdrObjSysNameCreatedEpoch = "CREATED_EPOCH"
// HdrObjSysLinkPrev is a name of previous link header in extended headers of object.
HdrObjSysLinkPrev = "LINK_PREV"
// HdrObjSysLinkNext is a name of next link header in extended headers of object.
HdrObjSysLinkNext = "LINK_NEXT"
// HdrObjSysLinkChild is a name of child link header in extended headers of object.
HdrObjSysLinkChild = "LINK_CHILD"
// HdrObjSysLinkPar is a name of parent link header in extended headers of object.
HdrObjSysLinkPar = "LINK_PAR"
// HdrObjSysLinkSG is a name of storage group link header in extended headers of object.
HdrObjSysLinkSG = "LINK_SG"
)

View file

@ -1,333 +0,0 @@
package eacl
import (
"encoding/binary"
"github.com/pkg/errors"
)
const (
sliceLenSize = 2 // uint16 for len()
actionSize = 4 // uint32
opTypeSize = 4 // uint32
hdrTypeSize = 4 // uint32
matchTypeSize = 4 // uint32
targetSize = 4 // uint32
)
// MarshalTable encodes Table into a
// binary form and returns the result.
//
// If table is nil, empty slice is returned.
func MarshalTable(table Table) []byte {
if table == nil {
return make([]byte, 0)
}
// allocate buffer
buf := make([]byte, tableBinSize(table))
records := table.Records()
// write record number
binary.BigEndian.PutUint16(buf, uint16(len(records)))
off := sliceLenSize
// write all records
for _, record := range records {
// write action
binary.BigEndian.PutUint32(buf[off:], uint32(record.Action()))
off += actionSize
// write operation type
binary.BigEndian.PutUint32(buf[off:], uint32(record.OperationType()))
off += actionSize
filters := record.HeaderFilters()
// write filter number
binary.BigEndian.PutUint16(buf[off:], uint16(len(filters)))
off += sliceLenSize
// write all filters
for _, filter := range filters {
// write header type
binary.BigEndian.PutUint32(buf[off:], uint32(filter.HeaderType()))
off += hdrTypeSize
// write match type
binary.BigEndian.PutUint32(buf[off:], uint32(filter.MatchType()))
off += matchTypeSize
// write header name size
name := []byte(filter.Name())
binary.BigEndian.PutUint16(buf[off:], uint16(len(name)))
off += sliceLenSize
// write header name bytes
off += copy(buf[off:], name)
// write header value size
val := []byte(filter.Value())
binary.BigEndian.PutUint16(buf[off:], uint16(len(val)))
off += sliceLenSize
// write header value bytes
off += copy(buf[off:], val)
}
targets := record.TargetList()
// write target number
binary.BigEndian.PutUint16(buf[off:], uint16(len(targets)))
off += sliceLenSize
// write all targets
for _, target := range targets {
// write target group
binary.BigEndian.PutUint32(buf[off:], uint32(target.Group()))
off += targetSize
keys := target.KeyList()
// write key number
binary.BigEndian.PutUint16(buf[off:], uint16(len(keys)))
off += sliceLenSize
// write keys
for i := range keys {
// write key size
binary.BigEndian.PutUint16(buf[off:], uint16(len(keys[i])))
off += sliceLenSize
// write key bytes
off += copy(buf[off:], keys[i])
}
}
}
return buf
}
// returns the size of Table in a binary format.
func tableBinSize(table Table) (sz int) {
sz = sliceLenSize // number of records
records := table.Records()
ln := len(records)
sz += ln * actionSize // action type of each record
sz += ln * opTypeSize // operation type of each record
for _, record := range records {
sz += sliceLenSize // number of filters
filters := record.HeaderFilters()
ln := len(filters)
sz += ln * hdrTypeSize // header type of each filter
sz += ln * matchTypeSize // match type of each filter
for _, filter := range filters {
sz += sliceLenSize // header name size
sz += len(filter.Name()) // header name bytes
sz += sliceLenSize // header value size
sz += len(filter.Value()) // header value bytes
}
sz += sliceLenSize // number of targets
targets := record.TargetList()
ln = len(targets)
sz += ln * targetSize // target group of each target
for _, target := range targets {
sz += sliceLenSize // number of keys
for _, key := range target.KeyList() {
sz += sliceLenSize // key size
sz += len(key) // key bytes
}
}
}
return
}
// UnmarshalTable unmarshals Table from
// a binary representation.
//
// If data is empty, table w/o records is returned.
func UnmarshalTable(data []byte) (Table, error) {
table := WrapTable(nil)
if len(data) == 0 {
return table, nil
}
// decode record number
if len(data) < sliceLenSize {
return nil, errors.New("could not decode record number")
}
recordNum := binary.BigEndian.Uint16(data)
records := make([]Record, 0, recordNum)
off := sliceLenSize
// decode all records one by one
for i := uint16(0); i < recordNum; i++ {
record := WrapRecord(nil)
// decode action
if len(data[off:]) < actionSize {
return nil, errors.Errorf("could not decode action of record #%d", i)
}
record.SetAction(Action(binary.BigEndian.Uint32(data[off:])))
off += actionSize
// decode operation type
if len(data[off:]) < opTypeSize {
return nil, errors.Errorf("could not decode operation type of record #%d", i)
}
record.SetOperationType(OperationType(binary.BigEndian.Uint32(data[off:])))
off += opTypeSize
// decode filter number
if len(data[off:]) < sliceLenSize {
return nil, errors.Errorf("could not decode filter number of record #%d", i)
}
filterNum := binary.BigEndian.Uint16(data[off:])
off += sliceLenSize
filters := make([]HeaderFilter, 0, filterNum)
// decode filters one by one
for j := uint16(0); j < filterNum; j++ {
filter := WrapFilterInfo(nil)
// decode header type
if len(data[off:]) < hdrTypeSize {
return nil, errors.Errorf("could not decode header type of filter #%d of record #%d", j, i)
}
filter.SetHeaderType(HeaderType(binary.BigEndian.Uint32(data[off:])) )
off += hdrTypeSize
// decode match type
if len(data[off:]) < matchTypeSize {
return nil, errors.Errorf("could not decode match type of filter #%d of record #%d", j, i)
}
filter.SetMatchType(MatchType(binary.BigEndian.Uint32(data[off:])) )
off += matchTypeSize
// decode header name size
if len(data[off:]) < sliceLenSize {
return nil, errors.Errorf("could not decode header name size of filter #%d of record #%d", j, i)
}
hdrNameSize := int(binary.BigEndian.Uint16(data[off:]))
off += sliceLenSize
// decode header name
if len(data[off:]) < hdrNameSize {
return nil, errors.Errorf("could not decode header name of filter #%d of record #%d", j, i)
}
filter.SetName(string(data[off : off+hdrNameSize]))
off += hdrNameSize
// decode header value size
if len(data[off:]) < sliceLenSize {
return nil, errors.Errorf("could not decode header value size of filter #%d of record #%d", j, i)
}
hdrValSize := int(binary.BigEndian.Uint16(data[off:]))
off += sliceLenSize
// decode header value
if len(data[off:]) < hdrValSize {
return nil, errors.Errorf("could not decode header value of filter #%d of record #%d", j, i)
}
filter.SetValue(string(data[off : off+hdrValSize]))
off += hdrValSize
filters = append(filters, filter)
}
record.SetHeaderFilters(filters)
// decode target number
if len(data[off:]) < sliceLenSize {
return nil, errors.Errorf("could not decode target number of record #%d", i)
}
targetNum := int(binary.BigEndian.Uint16(data[off:]))
off += sliceLenSize
targets := make([]Target, 0, targetNum)
// decode targets one by one
for j := 0; j < targetNum; j++ {
target := WrapTarget(nil)
// decode target group
if len(data[off:]) < targetSize {
return nil, errors.Errorf("could not decode target group of target #%d of record #%d", j, i)
}
target.SetGroup( Group(binary.BigEndian.Uint32(data[off:])), )
off += targetSize
// decode key number
if len(data[off:]) < sliceLenSize {
return nil, errors.Errorf("could not decode key number of target #%d of record #%d", j, i)
}
keyNum := int(binary.BigEndian.Uint16(data[off:]))
off += sliceLenSize
keys := make([][]byte, 0, keyNum)
for k := 0; k < keyNum; k++ {
// decode key size
if len(data[off:]) < sliceLenSize {
return nil, errors.Errorf("could not decode size of key #%d target #%d of record #%d", k, j, i)
}
keySz := int(binary.BigEndian.Uint16(data[off:]))
off += sliceLenSize
// decode key
if len(data[off:]) < keySz {
return nil, errors.Errorf("could not decode key #%d target #%d of record #%d", k, j, i)
}
key := make([]byte, keySz)
off += copy(key, data[off:off+keySz])
keys = append(keys, key)
}
target.SetKeyList(keys)
targets = append(targets, target)
}
record.SetTargetList(targets)
records = append(records, record)
}
table.SetRecords(records)
return table, nil
}

View file

@ -1,73 +0,0 @@
package eacl
// OperationType is an enumeration of operation types for extended ACL.
type OperationType uint32
// HeaderType is an enumeration of header types for extended ACL.
type HeaderType uint32
// MatchType is an enumeration of match types for extended ACL.
type MatchType uint32
// Action is an enumeration of extended ACL actions.
type Action uint32
// Group is an enumeration of access groups.
type Group uint32
// Header is an interface of string key-value pair,
type Header interface {
// Must return string identifier of header.
Name() string
// Must return string value of header.
Value() string
}
// TypedHeader is an interface of Header and HeaderType pair.
type TypedHeader interface {
Header
// Must return type of filtered header.
HeaderType() HeaderType
}
// HeaderFilter is an interface of grouped information about filtered header.
type HeaderFilter interface {
// Must return match type of filter.
MatchType() MatchType
TypedHeader
}
// Target is an interface of grouped information about extended ACL rule target.
type Target interface {
// Must return ACL target type.
Group() Group
// Must return public key list of ACL targets.
KeyList() [][]byte
}
// Record is an interface of record of extended ACL rule table.
type Record interface {
// Must return operation type of extended ACL rule.
OperationType() OperationType
// Must return list of header filters of extended ACL rule.
HeaderFilters() []HeaderFilter
// Must return target list of extended ACL rule.
TargetList() []Target
// Must return action of extended ACL rule.
Action() Action
}
// Table is an interface of extended ACL table.
type Table interface {
// Must return list of extended ACL rules.
Records() []Record
}

View file

@ -1,528 +0,0 @@
package eacl
import (
"github.com/nspcc-dev/neofs-api-go/acl"
)
// FilterWrapper is a wrapper over acl.EACLRecord_FilterInfo pointer.
type FilterWrapper struct {
filter *acl.EACLRecord_FilterInfo
}
// TargetWrapper is a wrapper over acl.EACLRecord_TargetInfo pointer.
type TargetWrapper struct {
target *acl.EACLRecord_TargetInfo
}
// RecordWrapper is a wrapper over acl.EACLRecord pointer.
type RecordWrapper struct {
record *acl.EACLRecord
}
// TableWrapper is a wrapper over acl.EACLTable pointer.
type TableWrapper struct {
table *acl.EACLTable
}
// WrapFilterInfo wraps EACLRecord_FilterInfo pointer.
//
// If argument is nil, new EACLRecord_FilterInfo is initialized.
func WrapFilterInfo(v *acl.EACLRecord_FilterInfo) FilterWrapper {
if v == nil {
v = new(acl.EACLRecord_FilterInfo)
}
return FilterWrapper{
filter: v,
}
}
// WrapTarget wraps EACLRecord_TargetInfo pointer.
//
// If argument is nil, new EACLRecord_TargetInfo is initialized.
func WrapTarget(v *acl.EACLRecord_TargetInfo) TargetWrapper {
if v == nil {
v = new(acl.EACLRecord_TargetInfo)
}
return TargetWrapper{
target: v,
}
}
// WrapRecord wraps EACLRecord pointer.
//
// If argument is nil, new EACLRecord is initialized.
func WrapRecord(v *acl.EACLRecord) RecordWrapper {
if v == nil {
v = new(acl.EACLRecord)
}
return RecordWrapper{
record: v,
}
}
// WrapTable wraps EACLTable pointer.
//
// If argument is nil, new EACLTable is initialized.
func WrapTable(v *acl.EACLTable) TableWrapper {
if v == nil {
v = new(acl.EACLTable)
}
return TableWrapper{
table: v,
}
}
// MatchType returns the match type of the filter.
//
// If filter is not initialized, 0 returns.
//
// Returns 0 if MatchType is not one of:
// - EACLRecord_FilterInfo_StringEqual;
// - EACLRecord_FilterInfo_StringNotEqual.
func (s FilterWrapper) MatchType() (res MatchType) {
if s.filter != nil {
switch s.filter.GetMatchType() {
case acl.EACLRecord_FilterInfo_StringEqual:
res = StringEqual
case acl.EACLRecord_FilterInfo_StringNotEqual:
res = StringNotEqual
}
}
return
}
// SetMatchType sets the match type of the filter.
//
// If filter is not initialized, nothing changes.
//
// MatchType is set to EACLRecord_FilterInfo_MatchUnknown if argument is not one of:
// - StringEqual;
// - StringNotEqual.
func (s FilterWrapper) SetMatchType(v MatchType) {
if s.filter != nil {
switch v {
case StringEqual:
s.filter.SetMatchType(acl.EACLRecord_FilterInfo_StringEqual)
case StringNotEqual:
s.filter.SetMatchType(acl.EACLRecord_FilterInfo_StringNotEqual)
default:
s.filter.SetMatchType(acl.EACLRecord_FilterInfo_MatchUnknown)
}
}
}
// Name returns the name of filtering header.
//
// If filter is not initialized, empty string returns.
func (s FilterWrapper) Name() string {
if s.filter == nil {
return ""
}
return s.filter.GetHeaderName()
}
// SetName sets the name of the filtering header.
//
// If filter is not initialized, nothing changes.
func (s FilterWrapper) SetName(v string) {
if s.filter != nil {
s.filter.SetHeaderName(v)
}
}
// Value returns the value of filtering header.
//
// If filter is not initialized, empty string returns.
func (s FilterWrapper) Value() string {
if s.filter == nil {
return ""
}
return s.filter.GetHeaderVal()
}
// SetValue sets the value of filtering header.
//
// If filter is not initialized, nothing changes.
func (s FilterWrapper) SetValue(v string) {
if s.filter != nil {
s.filter.SetHeaderVal(v)
}
}
// HeaderType returns the header type of the filter.
//
// If filter is not initialized, HdrTypeUnknown returns.
//
// Returns HdrTypeUnknown if Header is not one of:
// - EACLRecord_FilterInfo_Request;
// - EACLRecord_FilterInfo_ObjectSystem;
// - EACLRecord_FilterInfo_ObjectUser.
func (s FilterWrapper) HeaderType() (res HeaderType) {
res = HdrTypeUnknown
if s.filter != nil {
switch s.filter.GetHeader() {
case acl.EACLRecord_FilterInfo_Request:
res = HdrTypeRequest
case acl.EACLRecord_FilterInfo_ObjectSystem:
res = HdrTypeObjSys
case acl.EACLRecord_FilterInfo_ObjectUser:
res = HdrTypeObjUsr
}
}
return
}
// SetHeaderType sets the header type of the filter.
//
// If filter is not initialized, nothing changes.
//
// Header is set to EACLRecord_FilterInfo_HeaderUnknown if argument is not one of:
// - HdrTypeRequest;
// - HdrTypeObjSys;
// - HdrTypeObjUsr.
func (s FilterWrapper) SetHeaderType(t HeaderType) {
if s.filter != nil {
switch t {
case HdrTypeRequest:
s.filter.SetHeader(acl.EACLRecord_FilterInfo_Request)
case HdrTypeObjSys:
s.filter.SetHeader(acl.EACLRecord_FilterInfo_ObjectSystem)
case HdrTypeObjUsr:
s.filter.SetHeader(acl.EACLRecord_FilterInfo_ObjectUser)
default:
s.filter.SetHeader(acl.EACLRecord_FilterInfo_HeaderUnknown)
}
}
}
// Group returns the access group of the target.
//
// If target is not initialized, GroupUnknown returns.
//
// Returns GroupUnknown if Target is not one of:
// - Target_User;
// - GroupSystem;
// - GroupOthers.
func (s TargetWrapper) Group() (res Group) {
res = GroupUnknown
if s.target != nil {
switch s.target.GetTarget() {
case acl.Target_User:
res = GroupUser
case acl.Target_System:
res = GroupSystem
case acl.Target_Others:
res = GroupOthers
}
}
return
}
// SetGroup sets the access group of the target.
//
// If target is not initialized, nothing changes.
//
// Target is set to Target_Unknown if argument is not one of:
// - GroupUser;
// - GroupSystem;
// - GroupOthers.
func (s TargetWrapper) SetGroup(g Group) {
if s.target != nil {
switch g {
case GroupUser:
s.target.SetTarget(acl.Target_User)
case GroupSystem:
s.target.SetTarget(acl.Target_System)
case GroupOthers:
s.target.SetTarget(acl.Target_Others)
default:
s.target.SetTarget(acl.Target_Unknown)
}
}
}
// KeyList returns the key list of the target.
//
// If target is not initialized, nil returns.
func (s TargetWrapper) KeyList() [][]byte {
if s.target == nil {
return nil
}
return s.target.GetKeyList()
}
// SetKeyList sets the key list of the target.
//
// If target is not initialized, nothing changes.
func (s TargetWrapper) SetKeyList(v [][]byte) {
if s.target != nil {
s.target.SetKeyList(v)
}
}
// OperationType returns the operation type of the record.
//
// If record is not initialized, OpTypeUnknown returns.
//
// Returns OpTypeUnknown if Operation is not one of:
// - EACLRecord_HEAD;
// - EACLRecord_PUT;
// - EACLRecord_SEARCH;
// - EACLRecord_GET;
// - EACLRecord_GETRANGE;
// - EACLRecord_GETRANGEHASH;
// - EACLRecord_DELETE.
func (s RecordWrapper) OperationType() (res OperationType) {
res = OpTypeUnknown
if s.record != nil {
switch s.record.GetOperation() {
case acl.EACLRecord_HEAD:
res = OpTypeHead
case acl.EACLRecord_PUT:
res = OpTypePut
case acl.EACLRecord_SEARCH:
res = OpTypeSearch
case acl.EACLRecord_GET:
res = OpTypeGet
case acl.EACLRecord_GETRANGE:
res = OpTypeRange
case acl.EACLRecord_GETRANGEHASH:
res = OpTypeRangeHash
case acl.EACLRecord_DELETE:
res = OpTypeDelete
}
}
return
}
// SetOperationType sets the operation type of the record.
//
// If record is not initialized, nothing changes.
//
// Operation is set to EACLRecord_OPERATION_UNKNOWN if argument is not one of:
// - OpTypeHead;
// - OpTypePut;
// - OpTypeSearch;
// - OpTypeGet;
// - OpTypeRange;
// - OpTypeRangeHash;
// - OpTypeDelete.
func (s RecordWrapper) SetOperationType(v OperationType) {
if s.record != nil {
switch v {
case OpTypeHead:
s.record.SetOperation(acl.EACLRecord_HEAD)
case OpTypePut:
s.record.SetOperation(acl.EACLRecord_PUT)
case OpTypeSearch:
s.record.SetOperation(acl.EACLRecord_SEARCH)
case OpTypeGet:
s.record.SetOperation(acl.EACLRecord_GET)
case OpTypeRange:
s.record.SetOperation(acl.EACLRecord_GETRANGE)
case OpTypeRangeHash:
s.record.SetOperation(acl.EACLRecord_GETRANGEHASH)
case OpTypeDelete:
s.record.SetOperation(acl.EACLRecord_DELETE)
default:
s.record.SetOperation(acl.EACLRecord_OPERATION_UNKNOWN)
}
}
}
// Action returns the action of the record.
//
// If record is not initialized, ActionUnknown returns.
//
// Returns ActionUnknown if Action is not one of:
// - EACLRecord_Deny;
// - EACLRecord_Allow.
func (s RecordWrapper) Action() (res Action) {
res = ActionUnknown
if s.record != nil {
switch s.record.GetAction() {
case acl.EACLRecord_Deny:
res = ActionDeny
case acl.EACLRecord_Allow:
res = ActionAllow
}
}
return
}
// SetAction sets the action of the record.
//
// If record is not initialized, nothing changes.
//
// Action is set to EACLRecord_ActionUnknown if argument is not one of:
// - ActionDeny;
// - ActionAllow.
func (s RecordWrapper) SetAction(v Action) {
if s.record != nil {
switch v {
case ActionDeny:
s.record.SetAction(acl.EACLRecord_Deny)
case ActionAllow:
s.record.SetAction(acl.EACLRecord_Allow)
default:
s.record.SetAction(acl.EACLRecord_ActionUnknown)
}
}
}
// HeaderFilters returns the header filter list of the record.
//
// If record is not initialized, nil returns.
func (s RecordWrapper) HeaderFilters() []HeaderFilter {
if s.record == nil {
return nil
}
filters := s.record.GetFilters()
res := make([]HeaderFilter, 0, len(filters))
for i := range filters {
res = append(res, WrapFilterInfo(filters[i]))
}
return res
}
// SetHeaderFilters sets the header filter list of the record.
//
// Ignores nil elements of argument.
// If record is not initialized, nothing changes.
func (s RecordWrapper) SetHeaderFilters(v []HeaderFilter) {
if s.record == nil {
return
}
filters := make([]*acl.EACLRecord_FilterInfo, 0, len(v))
for i := range v {
if v[i] == nil {
continue
}
w := WrapFilterInfo(nil)
w.SetMatchType(v[i].MatchType())
w.SetHeaderType(v[i].HeaderType())
w.SetName(v[i].Name())
w.SetValue(v[i].Value())
filters = append(filters, w.filter)
}
s.record.SetFilters(filters)
}
// TargetList returns the target list of the record.
//
// If record is not initialized, nil returns.
func (s RecordWrapper) TargetList() []Target {
if s.record == nil {
return nil
}
targets := s.record.GetTargets()
res := make([]Target, 0, len(targets))
for i := range targets {
res = append(res, WrapTarget(targets[i]))
}
return res
}
// SetTargetList sets the target list of the record.
//
// Ignores nil elements of argument.
// If record is not initialized, nothing changes.
func (s RecordWrapper) SetTargetList(v []Target) {
if s.record == nil {
return
}
targets := make([]*acl.EACLRecord_TargetInfo, 0, len(v))
for i := range v {
if v[i] == nil {
continue
}
w := WrapTarget(nil)
w.SetGroup(v[i].Group())
w.SetKeyList(v[i].KeyList())
targets = append(targets, w.target)
}
s.record.SetTargets(targets)
}
// Records returns the record list of the table.
//
// If table is not initialized, nil returns.
func (s TableWrapper) Records() []Record {
if s.table == nil {
return nil
}
records := s.table.GetRecords()
res := make([]Record, 0, len(records))
for i := range records {
res = append(res, WrapRecord(records[i]))
}
return res
}
// SetRecords sets the record list of the table.
//
// Ignores nil elements of argument.
// If table is not initialized, nothing changes.
func (s TableWrapper) SetRecords(v []Record) {
if s.table == nil {
return
}
records := make([]*acl.EACLRecord, 0, len(v))
for i := range v {
if v[i] == nil {
continue
}
w := WrapRecord(nil)
w.SetOperationType(v[i].OperationType())
w.SetAction(v[i].Action())
w.SetHeaderFilters(v[i].HeaderFilters())
w.SetTargetList(v[i].TargetList())
records = append(records, w.record)
}
s.table.SetRecords(records)
}

View file

@ -1,136 +0,0 @@
package eacl
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestEACLFilterWrapper(t *testing.T) {
s := WrapFilterInfo(nil)
mt := StringEqual
s.SetMatchType(mt)
require.Equal(t, mt, s.MatchType())
ht := HdrTypeObjUsr
s.SetHeaderType(ht)
require.Equal(t, ht, s.HeaderType())
n := "name"
s.SetName(n)
require.Equal(t, n, s.Name())
v := "value"
s.SetValue(v)
require.Equal(t, v, s.Value())
}
func TestEACLTargetWrapper(t *testing.T) {
s := WrapTarget(nil)
group := Group(3)
s.SetGroup(group)
require.Equal(t, group, s.Group())
keys := [][]byte{
{1, 2, 3},
{4, 5, 6},
}
s.SetKeyList(keys)
require.Equal(t, keys, s.KeyList())
}
func TestEACLRecordWrapper(t *testing.T) {
s := WrapRecord(nil)
action := ActionAllow
s.SetAction(action)
require.Equal(t, action, s.Action())
opType := OperationType(5)
s.SetOperationType(opType)
require.Equal(t, opType, s.OperationType())
f1Name := "name1"
f1 := WrapFilterInfo(nil)
f1.SetName(f1Name)
f2Name := "name2"
f2 := WrapFilterInfo(nil)
f2.SetName(f2Name)
s.SetHeaderFilters([]HeaderFilter{f1, f2})
filters := s.HeaderFilters()
require.Len(t, filters, 2)
require.Equal(t, f1Name, filters[0].Name())
require.Equal(t, f2Name, filters[1].Name())
group1 := Group(1)
t1 := WrapTarget(nil)
t1.SetGroup(group1)
group2 := Group(2)
t2 := WrapTarget(nil)
t2.SetGroup(group2)
s.SetTargetList([]Target{t1, t2})
targets := s.TargetList()
require.Len(t, targets, 2)
require.Equal(t, group1, targets[0].Group())
require.Equal(t, group2, targets[1].Group())
}
func TestEACLTableWrapper(t *testing.T) {
s := WrapTable(nil)
action1 := Action(1)
r1 := WrapRecord(nil)
r1.SetAction(action1)
action2 := Action(2)
r2 := WrapRecord(nil)
r2.SetAction(action2)
s.SetRecords([]Record{r1, r2})
records := s.Records()
require.Len(t, records, 2)
require.Equal(t, action1, records[0].Action())
require.Equal(t, action2, records[1].Action())
s2, err := UnmarshalTable(MarshalTable(s))
require.NoError(t, err)
records1 := s.Records()
records2 := s2.Records()
require.Len(t, records1, len(records2))
for i := range records1 {
require.Equal(t, records1[i].Action(), records2[i].Action())
require.Equal(t, records1[i].OperationType(), records2[i].OperationType())
targets1 := records1[i].TargetList()
targets2 := records2[i].TargetList()
require.Len(t, targets1, len(targets2))
for j := range targets1 {
require.Equal(t, targets1[j].Group(), targets2[j].Group())
require.Equal(t, targets1[j].KeyList(), targets2[j].KeyList())
}
filters1 := records1[i].HeaderFilters()
filters2 := records2[i].HeaderFilters()
require.Len(t, filters1, len(filters2))
for j := range filters1 {
require.Equal(t, filters1[j].MatchType(), filters2[j].MatchType())
require.Equal(t, filters1[j].HeaderType(), filters2[j].HeaderType())
require.Equal(t, filters1[j].Name(), filters2[j].Name())
require.Equal(t, filters1[j].Value(), filters2[j].Value())
require.Equal(t, filters1[j].Value(), filters2[j].Value())
}
}
}

View file

@ -1,57 +0,0 @@
package acl
// SetMatchType is MatchType field setter.
func (m *EACLRecord_FilterInfo) SetMatchType(v EACLRecord_FilterInfo_MatchType) {
m.MatchType = v
}
// SetHeader is a Header field setter.
func (m *EACLRecord_FilterInfo) SetHeader(v EACLRecord_FilterInfo_Header) {
m.Header = v
}
// SetHeaderName is a HeaderName field setter.
func (m *EACLRecord_FilterInfo) SetHeaderName(v string) {
m.HeaderName = v
}
// SetHeaderVal is a HeaderVal field setter.
func (m *EACLRecord_FilterInfo) SetHeaderVal(v string) {
m.HeaderVal = v
}
// SetTarget is a Target field setter.
func (m *EACLRecord_TargetInfo) SetTarget(v Target) {
m.Target = v
}
// SetKeyList is a KeyList field setter.
func (m *EACLRecord_TargetInfo) SetKeyList(v [][]byte) {
m.KeyList = v
}
// SetOperation is an Operation field setter.
func (m *EACLRecord) SetOperation(v EACLRecord_Operation) {
m.Operation = v
}
// SetAction is an Action field setter.
func (m *EACLRecord) SetAction(v EACLRecord_Action) {
m.Action = v
}
// SetFilters is a Filters field setter.
func (m *EACLRecord) SetFilters(v []*EACLRecord_FilterInfo) {
m.Filters = v
}
// SetTargets is a Targets field setter.
func (m *EACLRecord) SetTargets(v []*EACLRecord_TargetInfo) {
m.Targets = v
}
// SetRecords is a Records field setter.
func (m *EACLTable) SetRecords(v []*EACLRecord) {
m.Records = v
}

File diff suppressed because it is too large Load diff

View file

@ -1,106 +0,0 @@
syntax = "proto3";
package acl;
option go_package = "github.com/nspcc-dev/neofs-api-go/acl";
option csharp_namespace = "NeoFS.API.Acl";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Target of the access control rule in access control list.
enum Target {
// Unknown target, default value.
Unknown = 0;
// User target rule is applied if sender is the owner of the container.
User = 1;
// System target rule is applied if sender is the storage node within the
// container or inner ring node.
System = 2;
// Others target rule is applied if sender is not user or system target.
Others = 3;
// PubKey target rule is applied if sender has public key provided in
// extended ACL.
PubKey = 4;
}
// EACLRecord groups information about extended ACL rule.
message EACLRecord {
// Operation is an enumeration of operation types.
enum Operation {
OPERATION_UNKNOWN = 0;
GET = 1;
HEAD = 2;
PUT = 3;
DELETE = 4;
SEARCH = 5;
GETRANGE = 6;
GETRANGEHASH = 7;
}
// Operation carries type of operation.
Operation operation = 1 [(gogoproto.customname) = "Operation", json_name="Operation"];
// Action is an enumeration of EACL actions.
enum Action {
ActionUnknown = 0;
Allow = 1;
Deny = 2;
}
// Action carries ACL target action.
Action action = 2 [(gogoproto.customname) = "Action", json_name="Action"];
// FilterInfo groups information about filter.
message FilterInfo {
// Header is an enumeration of filtering header types.
enum Header {
HeaderUnknown = 0;
Request = 1;
ObjectSystem = 2;
ObjectUser = 3;
}
// Header carries type of header.
Header header = 1 [(gogoproto.customname) = "Header", json_name="HeaderType"];
// MatchType is an enumeration of match types.
enum MatchType {
MatchUnknown = 0;
StringEqual = 1;
StringNotEqual = 2;
}
// MatchType carries type of match.
MatchType matchType = 2 [(gogoproto.customname) = "MatchType", json_name="MatchType"];
// HeaderName carries name of filtering header.
string HeaderName = 3 [json_name="Name"];
// HeaderVal carries value of filtering header.
string HeaderVal = 4 [json_name="Value"];
}
// Filters carries set of filters.
repeated FilterInfo Filters = 3 [json_name="Filters"];
// TargetInfo groups information about extended ACL target.
message TargetInfo {
// Target carries target of ACL rule.
acl.Target Target = 1 [json_name="Role"];
// KeyList carries public keys of ACL target.
repeated bytes KeyList = 2 [json_name="Keys"];
}
// Targets carries information about extended ACL target list.
repeated TargetInfo Targets = 4 [json_name="Targets"];
}
// EACLRecord carries the information about extended ACL rules.
message EACLTable {
// Records carries list of extended ACL rule records.
repeated EACLRecord Records = 1 [json_name="Records"];
}

View file

@ -1,8 +0,0 @@
package bootstrap
import (
"github.com/nspcc-dev/neofs-api-go/service"
)
// NodeType type alias.
type NodeType = service.NodeRole

View file

@ -1,628 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: bootstrap/service.proto
package bootstrap
import (
context "context"
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
service "github.com/nspcc-dev/neofs-api-go/service"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Node state
type Request_State int32
const (
// used by default
Request_Unknown Request_State = 0
// used to inform that node online
Request_Online Request_State = 1
// used to inform that node offline
Request_Offline Request_State = 2
)
var Request_State_name = map[int32]string{
0: "Unknown",
1: "Online",
2: "Offline",
}
var Request_State_value = map[string]int32{
"Unknown": 0,
"Online": 1,
"Offline": 2,
}
func (x Request_State) String() string {
return proto.EnumName(Request_State_name, int32(x))
}
func (Request_State) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_21bce759c9d8eb63, []int{0, 0}
}
type Request struct {
// Type is NodeType, can be InnerRingNode (type=1) or StorageNode (type=2)
Type NodeType `protobuf:"varint,1,opt,name=type,proto3,customtype=NodeType" json:"type"`
// Info contains information about node
Info NodeInfo `protobuf:"bytes,2,opt,name=info,proto3" json:"info"`
// State contains node status
State Request_State `protobuf:"varint,3,opt,name=state,proto3,enum=bootstrap.Request_State" json:"state,omitempty"`
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader `protobuf:"bytes,98,opt,name=Meta,proto3,embedded=Meta" json:"Meta"`
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader `protobuf:"bytes,99,opt,name=Verify,proto3,embedded=Verify" json:"Verify"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) {
return fileDescriptor_21bce759c9d8eb63, []int{0}
}
func (m *Request) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_Request.Merge(m, src)
}
func (m *Request) XXX_Size() int {
return m.Size()
}
func (m *Request) XXX_DiscardUnknown() {
xxx_messageInfo_Request.DiscardUnknown(m)
}
var xxx_messageInfo_Request proto.InternalMessageInfo
func (m *Request) GetInfo() NodeInfo {
if m != nil {
return m.Info
}
return NodeInfo{}
}
func (m *Request) GetState() Request_State {
if m != nil {
return m.State
}
return Request_Unknown
}
func init() {
proto.RegisterEnum("bootstrap.Request_State", Request_State_name, Request_State_value)
proto.RegisterType((*Request)(nil), "bootstrap.Request")
}
func init() { proto.RegisterFile("bootstrap/service.proto", fileDescriptor_21bce759c9d8eb63) }
var fileDescriptor_21bce759c9d8eb63 = []byte{
// 420 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x51, 0x4d, 0x8b, 0xd3, 0x40,
0x18, 0xee, 0xd4, 0xb4, 0xdd, 0x9d, 0x05, 0x59, 0x66, 0x57, 0x0c, 0x3d, 0xa4, 0xa5, 0xa7, 0x82,
0x66, 0x02, 0xdd, 0x8b, 0x47, 0x0d, 0x22, 0xee, 0x61, 0x3f, 0x48, 0xfd, 0x00, 0x6f, 0x93, 0xf4,
0x4d, 0x1d, 0x74, 0xe7, 0x1d, 0x33, 0xd3, 0x4a, 0xff, 0x89, 0xbf, 0xc1, 0xff, 0x21, 0xec, 0x71,
0x8f, 0xe2, 0xa1, 0x48, 0xfc, 0x23, 0x32, 0xd3, 0xb4, 0x16, 0xf7, 0x94, 0xbc, 0xcf, 0xd7, 0xbc,
0x1f, 0xf4, 0x71, 0x8e, 0x68, 0x8d, 0xad, 0x84, 0x4e, 0x0c, 0x54, 0x4b, 0x59, 0x00, 0xd7, 0x15,
0x5a, 0x64, 0x87, 0x3b, 0xa2, 0xcf, 0x1a, 0x26, 0xb9, 0x01, 0x2b, 0x36, 0x74, 0xff, 0x74, 0x8b,
0x2d, 0xa1, 0x92, 0xe5, 0xaa, 0x41, 0x1f, 0xfd, 0x4b, 0xb3, 0x2b, 0x0d, 0xa6, 0x81, 0xe3, 0xb9,
0xb4, 0x1f, 0x17, 0x39, 0x2f, 0xf0, 0x26, 0x99, 0xe3, 0x1c, 0x13, 0x0f, 0xe7, 0x8b, 0xd2, 0x57,
0xbe, 0xf0, 0x7f, 0x1b, 0xf9, 0xe8, 0x47, 0x9b, 0xf6, 0x32, 0xf8, 0xb2, 0x00, 0x63, 0xd9, 0x53,
0x1a, 0xb8, 0xa4, 0x90, 0x0c, 0xc9, 0xb8, 0x93, 0x86, 0xb7, 0xeb, 0x41, 0xeb, 0xd7, 0x7a, 0x70,
0x70, 0x89, 0x33, 0x78, 0xb3, 0xd2, 0x50, 0xaf, 0x07, 0x81, 0xfb, 0x66, 0x5e, 0xc5, 0x62, 0x1a,
0x48, 0x55, 0x62, 0xd8, 0x1e, 0x92, 0xf1, 0xd1, 0xe4, 0x84, 0xef, 0xda, 0xe1, 0xce, 0x70, 0xae,
0x4a, 0x4c, 0x03, 0x17, 0x91, 0x79, 0x19, 0xe3, 0xb4, 0x63, 0xac, 0xb0, 0x10, 0x3e, 0x18, 0x92,
0xf1, 0xc3, 0x49, 0xb8, 0xa7, 0x6f, 0xde, 0xe7, 0x53, 0xc7, 0x67, 0x1b, 0x19, 0x7b, 0x46, 0x83,
0x0b, 0xb0, 0x22, 0xcc, 0x7d, 0x7c, 0x9f, 0x6f, 0x37, 0xd6, 0x88, 0x1d, 0xf7, 0x1a, 0xc4, 0x0c,
0xaa, 0xf4, 0xc0, 0xbd, 0x72, 0xb7, 0x1e, 0x90, 0xcc, 0x3b, 0xd8, 0x4b, 0xda, 0x7d, 0xe7, 0x17,
0x15, 0x16, 0xde, 0x3b, 0xfa, 0xdf, 0xeb, 0x59, 0x59, 0x08, 0x2b, 0x51, 0xdd, 0xcb, 0x68, 0xbc,
0xa3, 0x98, 0x76, 0x7c, 0x3f, 0xec, 0x88, 0xf6, 0xde, 0xaa, 0x4f, 0x0a, 0xbf, 0xaa, 0xe3, 0x16,
0xa3, 0xb4, 0x7b, 0xa5, 0x3e, 0x4b, 0x05, 0xc7, 0xc4, 0x11, 0x57, 0x65, 0xe9, 0x8b, 0xf6, 0xe4,
0x39, 0x3d, 0x4c, 0xb7, 0x03, 0xb1, 0x33, 0xda, 0xbb, 0xae, 0xb0, 0x00, 0x63, 0x18, 0xbb, 0x3f,
0x67, 0xff, 0x74, 0x0f, 0x9b, 0xea, 0x0a, 0xc4, 0xec, 0x42, 0xe8, 0xf4, 0xfd, 0x6d, 0x1d, 0x91,
0xbb, 0x3a, 0x22, 0x3f, 0xeb, 0x88, 0xfc, 0xae, 0x23, 0xf2, 0xed, 0x4f, 0xd4, 0xfa, 0xf0, 0x64,
0xef, 0x9c, 0xca, 0xe8, 0xa2, 0x88, 0x67, 0xb0, 0x4c, 0x14, 0x60, 0x69, 0x62, 0xa1, 0x65, 0x3c,
0xc7, 0x64, 0x17, 0xf6, 0xbd, 0x7d, 0x72, 0x09, 0xf8, 0x6a, 0xca, 0x5f, 0x5c, 0x9f, 0xf3, 0x5d,
0x37, 0x79, 0xd7, 0x5f, 0xfa, 0xec, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0xc6, 0xb6, 0xb6,
0x7f, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// BootstrapClient is the client API for Bootstrap service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type BootstrapClient interface {
// Process is method that allows to register node in the network and receive actual netmap
Process(ctx context.Context, in *Request, opts ...grpc.CallOption) (*SpreadMap, error)
}
type bootstrapClient struct {
cc *grpc.ClientConn
}
func NewBootstrapClient(cc *grpc.ClientConn) BootstrapClient {
return &bootstrapClient{cc}
}
func (c *bootstrapClient) Process(ctx context.Context, in *Request, opts ...grpc.CallOption) (*SpreadMap, error) {
out := new(SpreadMap)
err := c.cc.Invoke(ctx, "/bootstrap.Bootstrap/Process", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// BootstrapServer is the server API for Bootstrap service.
type BootstrapServer interface {
// Process is method that allows to register node in the network and receive actual netmap
Process(context.Context, *Request) (*SpreadMap, error)
}
// UnimplementedBootstrapServer can be embedded to have forward compatible implementations.
type UnimplementedBootstrapServer struct {
}
func (*UnimplementedBootstrapServer) Process(ctx context.Context, req *Request) (*SpreadMap, error) {
return nil, status.Errorf(codes.Unimplemented, "method Process not implemented")
}
func RegisterBootstrapServer(s *grpc.Server, srv BootstrapServer) {
s.RegisterService(&_Bootstrap_serviceDesc, srv)
}
func _Bootstrap_Process_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BootstrapServer).Process(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/bootstrap.Bootstrap/Process",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BootstrapServer).Process(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _Bootstrap_serviceDesc = grpc.ServiceDesc{
ServiceName: "bootstrap.Bootstrap",
HandlerType: (*BootstrapServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Process",
Handler: _Bootstrap_Process_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "bootstrap/service.proto",
}
func (m *Request) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
{
size, err := m.RequestVerificationHeader.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6
i--
dAtA[i] = 0x9a
{
size, err := m.RequestMetaHeader.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6
i--
dAtA[i] = 0x92
if m.State != 0 {
i = encodeVarintService(dAtA, i, uint64(m.State))
i--
dAtA[i] = 0x18
}
{
size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if m.Type != 0 {
i = encodeVarintService(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintService(dAtA []byte, offset int, v uint64) int {
offset -= sovService(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Request) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Type != 0 {
n += 1 + sovService(uint64(m.Type))
}
l = m.Info.Size()
n += 1 + l + sovService(uint64(l))
if m.State != 0 {
n += 1 + sovService(uint64(m.State))
}
l = m.RequestMetaHeader.Size()
n += 2 + l + sovService(uint64(l))
l = m.RequestVerificationHeader.Size()
n += 2 + l + sovService(uint64(l))
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovService(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozService(x uint64) (n int) {
return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Request) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Request: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= NodeType(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
}
m.State = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.State |= Request_State(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 98:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RequestMetaHeader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.RequestMetaHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 99:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RequestVerificationHeader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.RequestVerificationHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipService(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthService
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipService(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthService
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupService
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthService
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowService = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group")
)

View file

@ -1,42 +0,0 @@
syntax = "proto3";
package bootstrap;
option go_package = "github.com/nspcc-dev/neofs-api-go/bootstrap";
option csharp_namespace = "NeoFS.API.Bootstrap";
import "service/meta.proto";
import "service/verify.proto";
import "bootstrap/types.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Bootstrap service allows neofs-node to connect to the network. Node should
// perform at least one bootstrap request in the epoch to stay in the network
// for the next epoch.
service Bootstrap {
// Process is method that allows to register node in the network and receive actual netmap
rpc Process(Request) returns (bootstrap.SpreadMap);
}
message Request {
// Node state
enum State {
// used by default
Unknown = 0;
// used to inform that node online
Online = 1;
// used to inform that node offline
Offline = 2;
}
// Type is NodeType, can be InnerRingNode (type=1) or StorageNode (type=2)
int32 type = 1 [(gogoproto.customname) = "Type" , (gogoproto.nullable) = false, (gogoproto.customtype) = "NodeType"];
// Info contains information about node
bootstrap.NodeInfo info = 2 [(gogoproto.nullable) = false];
// State contains node status
State state = 3;
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}

View file

@ -1,46 +0,0 @@
package bootstrap
import (
"io"
"github.com/nspcc-dev/neofs-api-go/service"
)
// SignedData returns payload bytes of the request.
func (m Request) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m Request) SignedDataSize() (sz int) {
sz += m.GetType().Size()
sz += m.GetState().Size()
info := m.GetInfo()
sz += info.Size()
return
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m Request) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetType().Bytes())
off += copy(p[off:], m.GetState().Bytes())
info := m.GetInfo()
// FIXME: implement and use stable functions
n, err := info.MarshalTo(p[off:])
off += n
return off, err
}

View file

@ -1,82 +0,0 @@
package bootstrap
import (
"testing"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestRequestSign(t *testing.T) {
sk := test.DecodeKey(0)
type sigType interface {
service.RequestData
service.SignKeyPairAccumulator
service.SignKeyPairSource
SetToken(*service.Token)
}
items := []struct {
constructor func() sigType
payloadCorrupt []func(sigType)
}{
{ // Request
constructor: func() sigType {
return new(Request)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*Request)
req.SetType(req.GetType() + 1)
},
func(s sigType) {
req := s.(*Request)
req.SetState(req.GetState() + 1)
},
func(s sigType) {
req := s.(*Request)
info := req.GetInfo()
info.Address += "1"
req.SetInfo(info)
},
},
},
}
for _, item := range items {
{ // token corruptions
v := item.constructor()
token := new(service.Token)
v.SetToken(token)
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyRequestData(v))
}
{ // payload corruptions
for _, corruption := range item.payloadCorrupt {
v := item.constructor()
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
corruption(v)
require.Error(t, service.VerifyRequestData(v))
}
}
}
}

View file

@ -1,137 +0,0 @@
package bootstrap
import (
"bytes"
"encoding/binary"
"encoding/hex"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
"github.com/nspcc-dev/neofs-api-go/object"
)
type (
// NodeStatus is a bitwise status field of the node.
NodeStatus uint64
)
const (
storageFullMask = 0x1
optionCapacity = "/Capacity:"
optionPrice = "/Price:"
)
var (
_ proto.Message = (*NodeInfo)(nil)
_ proto.Message = (*SpreadMap)(nil)
)
var requestEndianness = binary.BigEndian
// Equals checks whether two NodeInfo has same address.
func (m NodeInfo) Equals(n1 NodeInfo) bool {
return m.Address == n1.Address && bytes.Equal(m.PubKey, n1.PubKey)
}
// Full checks if node has enough space for storing users objects.
func (n NodeStatus) Full() bool {
return n&storageFullMask > 0
}
// SetFull changes state of node to indicate if node has enough space for storing users objects.
// If value is true - there's not enough space.
func (n *NodeStatus) SetFull(value bool) {
switch value {
case true:
*n |= NodeStatus(storageFullMask)
case false:
*n &= NodeStatus(^uint64(storageFullMask))
}
}
// Price returns price in 1e-8*GAS/Megabyte per month.
// User set price in GAS/Terabyte per month.
func (m NodeInfo) Price() uint64 {
for i := range m.Options {
if strings.HasPrefix(m.Options[i], optionPrice) {
n, err := strconv.ParseFloat(m.Options[i][len(optionPrice):], 64)
if err != nil {
return 0
}
return uint64(n*1e8) / uint64(object.UnitsMB) // UnitsMB == megabytes in 1 terabyte
}
}
return 0
}
// Capacity returns node's capacity as reported by user.
func (m NodeInfo) Capacity() uint64 {
for i := range m.Options {
if strings.HasPrefix(m.Options[i], optionCapacity) {
n, err := strconv.ParseUint(m.Options[i][len(optionCapacity):], 10, 64)
if err != nil {
return 0
}
return n
}
}
return 0
}
// String returns string representation of NodeInfo.
func (m NodeInfo) String() string {
return "(NodeInfo)<" +
"Address:" + m.Address +
", " +
"PublicKey:" + hex.EncodeToString(m.PubKey) +
", " +
"Options: [" + strings.Join(m.Options, ",") + "]>"
}
// String returns string representation of SpreadMap.
func (m SpreadMap) String() string {
result := make([]string, 0, len(m.NetMap))
for i := range m.NetMap {
result = append(result, m.NetMap[i].String())
}
return "(SpreadMap)<" +
"Epoch: " + strconv.FormatUint(m.Epoch, 10) +
", " +
"Netmap: [" + strings.Join(result, ",") + "]>"
}
// GetType is a Type field getter.
func (m Request) GetType() NodeType {
return m.Type
}
// SetType is a Type field setter.
func (m *Request) SetType(t NodeType) {
m.Type = t
}
// SetState is a State field setter.
func (m *Request) SetState(state Request_State) {
m.State = state
}
// SetInfo is an Info field getter.
func (m *Request) SetInfo(info NodeInfo) {
m.Info = info
}
// Size returns the size necessary for a binary representation of the state.
func (x Request_State) Size() int {
return 4
}
// Bytes returns a binary representation of the state.
func (x Request_State) Bytes() []byte {
data := make([]byte, x.Size())
requestEndianness.PutUint32(data, uint32(x))
return data
}

View file

@ -1,705 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: bootstrap/types.proto
package bootstrap
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type SpreadMap struct {
// Epoch is current epoch for netmap
Epoch uint64 `protobuf:"varint,1,opt,name=Epoch,proto3" json:"Epoch,omitempty"`
// NetMap is a set of NodeInfos
NetMap []NodeInfo `protobuf:"bytes,2,rep,name=NetMap,proto3" json:"NetMap"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SpreadMap) Reset() { *m = SpreadMap{} }
func (*SpreadMap) ProtoMessage() {}
func (*SpreadMap) Descriptor() ([]byte, []int) {
return fileDescriptor_423083266369adee, []int{0}
}
func (m *SpreadMap) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SpreadMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SpreadMap) XXX_Merge(src proto.Message) {
xxx_messageInfo_SpreadMap.Merge(m, src)
}
func (m *SpreadMap) XXX_Size() int {
return m.Size()
}
func (m *SpreadMap) XXX_DiscardUnknown() {
xxx_messageInfo_SpreadMap.DiscardUnknown(m)
}
var xxx_messageInfo_SpreadMap proto.InternalMessageInfo
func (m *SpreadMap) GetEpoch() uint64 {
if m != nil {
return m.Epoch
}
return 0
}
func (m *SpreadMap) GetNetMap() []NodeInfo {
if m != nil {
return m.NetMap
}
return nil
}
type NodeInfo struct {
// Address is a node [multi-address](https://github.com/multiformats/multiaddr)
Address string `protobuf:"bytes,1,opt,name=Address,proto3" json:"address"`
// PubKey is a compressed public key representation in bytes
PubKey []byte `protobuf:"bytes,2,opt,name=PubKey,proto3" json:"pubkey,omitempty"`
// Options is set of node optional information, such as storage capacity, node location, price and etc
Options []string `protobuf:"bytes,3,rep,name=Options,proto3" json:"options,omitempty"`
// Status is bitmap status of the node
Status NodeStatus `protobuf:"varint,4,opt,name=Status,proto3,customtype=NodeStatus" json:"status"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeInfo) Reset() { *m = NodeInfo{} }
func (*NodeInfo) ProtoMessage() {}
func (*NodeInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_423083266369adee, []int{1}
}
func (m *NodeInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NodeInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeInfo.Merge(m, src)
}
func (m *NodeInfo) XXX_Size() int {
return m.Size()
}
func (m *NodeInfo) XXX_DiscardUnknown() {
xxx_messageInfo_NodeInfo.DiscardUnknown(m)
}
var xxx_messageInfo_NodeInfo proto.InternalMessageInfo
func (m *NodeInfo) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
func (m *NodeInfo) GetPubKey() []byte {
if m != nil {
return m.PubKey
}
return nil
}
func (m *NodeInfo) GetOptions() []string {
if m != nil {
return m.Options
}
return nil
}
func init() {
proto.RegisterType((*SpreadMap)(nil), "bootstrap.SpreadMap")
proto.RegisterType((*NodeInfo)(nil), "bootstrap.NodeInfo")
}
func init() { proto.RegisterFile("bootstrap/types.proto", fileDescriptor_423083266369adee) }
var fileDescriptor_423083266369adee = []byte{
// 370 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xdf, 0x8a, 0x9b, 0x40,
0x14, 0xc6, 0x9d, 0x24, 0x35, 0x75, 0xd2, 0x8b, 0xd6, 0x24, 0x20, 0xa5, 0xa8, 0x04, 0x0a, 0x42,
0xa3, 0xd2, 0x3f, 0x2f, 0x10, 0xa1, 0x85, 0x50, 0x92, 0x06, 0xd3, 0xab, 0xde, 0x14, 0xff, 0x4c,
0x8c, 0x2c, 0xf1, 0x0c, 0xce, 0xb8, 0xe0, 0xdd, 0x3e, 0xc6, 0x3e, 0xc3, 0x3e, 0xc8, 0x92, 0xcb,
0xbd, 0x0c, 0x7b, 0x21, 0xbb, 0xee, 0x9d, 0x4f, 0xb1, 0x44, 0x63, 0xc8, 0xdd, 0xf9, 0xbe, 0xef,
0x37, 0x33, 0x67, 0xce, 0xc1, 0x63, 0x1f, 0x80, 0x33, 0x9e, 0x7a, 0xd4, 0xe6, 0x39, 0x25, 0xcc,
0xa2, 0x29, 0x70, 0x90, 0xa5, 0xb3, 0xfd, 0xd1, 0x8c, 0x62, 0xbe, 0xcd, 0x7c, 0x2b, 0x80, 0x9d,
0x1d, 0x41, 0x04, 0x76, 0x4d, 0xf8, 0xd9, 0xa6, 0x56, 0xb5, 0xa8, 0xab, 0xe6, 0xe4, 0xe4, 0x2f,
0x96, 0xd6, 0x34, 0x25, 0x5e, 0xb8, 0xf0, 0xa8, 0x3c, 0xc2, 0x6f, 0x7e, 0x52, 0x08, 0xb6, 0x0a,
0xd2, 0x91, 0xd1, 0x73, 0x1b, 0x21, 0x7f, 0xc5, 0xe2, 0x92, 0xf0, 0x85, 0x47, 0x95, 0x8e, 0xde,
0x35, 0x06, 0xdf, 0x86, 0xd6, 0xf9, 0x35, 0x6b, 0x09, 0x21, 0x99, 0x27, 0x1b, 0x70, 0x7a, 0xfb,
0x42, 0x13, 0xdc, 0x13, 0x38, 0xb9, 0x47, 0xf8, 0x6d, 0x1b, 0xc9, 0x9f, 0x71, 0x7f, 0x16, 0x86,
0x29, 0x61, 0xac, 0xbe, 0x57, 0x72, 0x06, 0x55, 0xa1, 0xf5, 0xbd, 0xc6, 0x72, 0xdb, 0x4c, 0x9e,
0x62, 0x71, 0x95, 0xf9, 0xbf, 0x49, 0xae, 0x74, 0x74, 0x64, 0xbc, 0x73, 0x46, 0x55, 0xa1, 0xbd,
0xa7, 0x99, 0x7f, 0x45, 0xf2, 0x29, 0xec, 0x62, 0x4e, 0x76, 0x94, 0xe7, 0xee, 0x89, 0x91, 0x6d,
0xdc, 0xff, 0x43, 0x79, 0x0c, 0x09, 0x53, 0xba, 0x7a, 0xd7, 0x90, 0x9c, 0x71, 0x55, 0x68, 0x1f,
0xa0, 0xb1, 0x2e, 0xf8, 0x96, 0x92, 0x7f, 0x60, 0x71, 0xcd, 0x3d, 0x9e, 0x31, 0xa5, 0x77, 0xfc,
0x9c, 0xf3, 0xe9, 0xd8, 0xf0, 0x63, 0xa1, 0xe1, 0x63, 0x9f, 0x4d, 0x52, 0x15, 0x9a, 0xc8, 0xea,
0xca, 0x3d, 0xb1, 0xce, 0xff, 0xc3, 0xb3, 0x2a, 0xdc, 0x94, 0xaa, 0xb0, 0x2f, 0x55, 0xf4, 0x50,
0xaa, 0xe8, 0x50, 0xaa, 0xe8, 0xa9, 0x54, 0xd1, 0xed, 0x8b, 0x2a, 0xfc, 0xfb, 0x72, 0x31, 0xeb,
0x84, 0xd1, 0x20, 0x30, 0x43, 0x72, 0x6d, 0x27, 0x04, 0x36, 0xcc, 0xf4, 0x68, 0x6c, 0x46, 0x60,
0x9f, 0x47, 0x75, 0xd7, 0x19, 0x2e, 0x09, 0xfc, 0x5a, 0x5b, 0xb3, 0xd5, 0xdc, 0x72, 0x5a, 0xd7,
0x17, 0xeb, 0x35, 0x7c, 0x7f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xef, 0x9f, 0x22, 0x25, 0xd9, 0x01,
0x00, 0x00,
}
func (m *SpreadMap) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SpreadMap) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SpreadMap) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.NetMap) > 0 {
for iNdEx := len(m.NetMap) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.NetMap[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if m.Epoch != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Epoch))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *NodeInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NodeInfo) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NodeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Status != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Status))
i--
dAtA[i] = 0x20
}
if len(m.Options) > 0 {
for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Options[iNdEx])
copy(dAtA[i:], m.Options[iNdEx])
i = encodeVarintTypes(dAtA, i, uint64(len(m.Options[iNdEx])))
i--
dAtA[i] = 0x1a
}
}
if len(m.PubKey) > 0 {
i -= len(m.PubKey)
copy(dAtA[i:], m.PubKey)
i = encodeVarintTypes(dAtA, i, uint64(len(m.PubKey)))
i--
dAtA[i] = 0x12
}
if len(m.Address) > 0 {
i -= len(m.Address)
copy(dAtA[i:], m.Address)
i = encodeVarintTypes(dAtA, i, uint64(len(m.Address)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
offset -= sovTypes(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *SpreadMap) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Epoch != 0 {
n += 1 + sovTypes(uint64(m.Epoch))
}
if len(m.NetMap) > 0 {
for _, e := range m.NetMap {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *NodeInfo) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Address)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.PubKey)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if len(m.Options) > 0 {
for _, s := range m.Options {
l = len(s)
n += 1 + l + sovTypes(uint64(l))
}
}
if m.Status != 0 {
n += 1 + sovTypes(uint64(m.Status))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovTypes(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozTypes(x uint64) (n int) {
return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *SpreadMap) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SpreadMap: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SpreadMap: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType)
}
m.Epoch = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Epoch |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NetMap", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NetMap = append(m.NetMap, NodeInfo{})
if err := m.NetMap[len(m.NetMap)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Address = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PubKey = append(m.PubKey[:0], dAtA[iNdEx:postIndex]...)
if m.PubKey == nil {
m.PubKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Options = append(m.Options, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
m.Status = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Status |= NodeStatus(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTypes(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthTypes
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupTypes
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthTypes
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group")
)

View file

@ -1,29 +0,0 @@
syntax = "proto3";
package bootstrap;
option go_package = "github.com/nspcc-dev/neofs-api-go/bootstrap";
option csharp_namespace = "NeoFS.API.Bootstrap";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
option (gogoproto.stringer_all) = false;
option (gogoproto.goproto_stringer_all) = false;
message SpreadMap {
// Epoch is current epoch for netmap
uint64 Epoch = 1;
// NetMap is a set of NodeInfos
repeated NodeInfo NetMap = 2 [(gogoproto.nullable) = false];
}
message NodeInfo {
// Address is a node [multi-address](https://github.com/multiformats/multiaddr)
string Address = 1 [(gogoproto.jsontag) = "address"];
// PubKey is a compressed public key representation in bytes
bytes PubKey = 2 [(gogoproto.jsontag) = "pubkey,omitempty"];
// Options is set of node optional information, such as storage capacity, node location, price and etc
repeated string Options = 3 [(gogoproto.jsontag) = "options,omitempty"];
// Status is bitmap status of the node
uint64 Status = 4 [(gogoproto.jsontag) = "status", (gogoproto.nullable) = false, (gogoproto.customtype) = "NodeStatus"];
}

View file

@ -1,39 +0,0 @@
package bootstrap
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestRequestGettersSetters(t *testing.T) {
t.Run("type", func(t *testing.T) {
rt := NodeType(1)
m := new(Request)
m.SetType(rt)
require.Equal(t, rt, m.GetType())
})
t.Run("state", func(t *testing.T) {
st := Request_State(1)
m := new(Request)
m.SetState(st)
require.Equal(t, st, m.GetState())
})
t.Run("info", func(t *testing.T) {
info := NodeInfo{
Address: "some address",
}
m := new(Request)
m.SetInfo(info)
require.Equal(t, info, m.GetInfo())
})
}

View file

@ -1,30 +0,0 @@
package chain
import (
"crypto/ecdsa"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
// WalletAddress implements NEO address.
type WalletAddress [AddressLength]byte
const (
// AddressLength contains size of address,
// 1 byte of address version + 20 bytes of ScriptHash + 4 bytes of checksum.
AddressLength = 25
)
// KeyToAddress returns NEO address composed from public key.
func KeyToAddress(key *ecdsa.PublicKey) string {
if key == nil {
return ""
}
neoPublicKey := keys.PublicKey{
X: key.X,
Y: key.Y,
}
return neoPublicKey.Address()
}

View file

@ -1,41 +0,0 @@
package chain
import (
"encoding/hex"
"testing"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/stretchr/testify/require"
)
type addressTestCase struct {
name string
publicKey string
wallet string
}
func TestKeyToAddress(t *testing.T) {
tests := []addressTestCase{
{
"nil key",
"",
"",
},
{
"correct key",
"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
"NgzuJWWGVEwFGsRrgzj8knswEYRJrTe7sm",
},
}
for i := range tests {
t.Run(tests[i].name, func(t *testing.T) {
data, err := hex.DecodeString(tests[i].publicKey)
require.NoError(t, err)
key := crypto.UnmarshalPublicKey(data)
require.Equal(t, tests[i].wallet, KeyToAddress(key))
})
}
}

View file

@ -1,60 +0,0 @@
package container
import (
"bytes"
"encoding/binary"
"github.com/nspcc-dev/neofs-api-go/internal"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/pkg/errors"
)
type (
// CID type alias.
CID = refs.CID
// UUID type alias.
UUID = refs.UUID
// OwnerID type alias.
OwnerID = refs.OwnerID
// MessageID type alias.
MessageID = refs.MessageID
)
const (
// ErrNotFound is raised when container could not be found.
ErrNotFound = internal.Error("could not find container")
)
// PrepareData prepares bytes representation of PutRequest to satisfy SignedRequest interface.
func (m *PutRequest) PrepareData() ([]byte, error) {
var (
err error
buf = new(bytes.Buffer)
capBytes = make([]byte, 8)
aclBytes = make([]byte, 4)
)
binary.BigEndian.PutUint64(capBytes, m.Capacity)
binary.BigEndian.PutUint32(capBytes, m.BasicACL)
if _, err = buf.Write(m.MessageID.Bytes()); err != nil {
return nil, errors.Wrap(err, "could not write message id")
} else if _, err = buf.Write(capBytes); err != nil {
return nil, errors.Wrap(err, "could not write capacity")
} else if _, err = buf.Write(m.OwnerID.Bytes()); err != nil {
return nil, errors.Wrap(err, "could not write pub")
} else if data, err := m.Rules.Marshal(); err != nil {
return nil, errors.Wrap(err, "could not marshal placement")
} else if _, err = buf.Write(data); err != nil {
return nil, errors.Wrap(err, "could not write placement")
} else if _, err = buf.Write(aclBytes); err != nil {
return nil, errors.Wrap(err, "could not write basic acl")
}
return buf.Bytes(), nil
}
// PrepareData prepares bytes representation of DeleteRequest to satisfy SignedRequest interface.
func (m *DeleteRequest) PrepareData() ([]byte, error) {
return m.CID.Bytes(), nil
}

File diff suppressed because it is too large Load diff

View file

@ -1,146 +0,0 @@
syntax = "proto3";
package container;
option go_package = "github.com/nspcc-dev/neofs-api-go/container";
option csharp_namespace = "NeoFS.API.Container";
import "service/meta.proto";
import "service/verify.proto";
import "container/types.proto";
import "github.com/nspcc-dev/netmap/selector.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Container service provides API for manipulating with the container.
service Service {
// Put request proposes container to the inner ring nodes. They will
// accept new container if user has enough deposit. All containers
// are accepted by the consensus, therefore it is asynchronous process.
rpc Put(PutRequest) returns (PutResponse);
// Delete container removes it from the inner ring container storage. It
// also asynchronous process done by consensus.
rpc Delete(DeleteRequest) returns (DeleteResponse);
// Get container returns container instance
rpc Get(GetRequest) returns (GetResponse);
// List returns all user's containers
rpc List(ListRequest) returns (ListResponse);
// SetExtendedACL changes extended ACL rules of the container
rpc SetExtendedACL(SetExtendedACLRequest) returns (SetExtendedACLResponse);
// GetExtendedACL returns extended ACL rules of the container
rpc GetExtendedACL(GetExtendedACLRequest) returns (GetExtendedACLResponse);
}
message PutRequest {
// MessageID is a nonce for uniq container id calculation
bytes MessageID = 1 [(gogoproto.customtype) = "MessageID", (gogoproto.nullable) = false];
// Capacity defines amount of data that can be stored in the container (doesn't used for now).
uint64 Capacity = 2;
// OwnerID is a wallet address
bytes OwnerID = 3 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// Rules define storage policy for the object inside the container.
netmap.PlacementRule rules = 4 [(gogoproto.nullable) = false];
// BasicACL of the container.
uint32 BasicACL = 5;
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message PutResponse {
// CID (container id) is a SHA256 hash of the container structure
bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
}
message DeleteRequest {
// CID (container id) is a SHA256 hash of the container structure
bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
// DeleteResponse is empty because delete operation is asynchronous and done
// via consensus in inner ring nodes
message DeleteResponse { }
message GetRequest {
// CID (container id) is a SHA256 hash of the container structure
bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetResponse {
// Container is a structure that contains placement rules and owner id
container.Container Container = 1;
}
message ListRequest {
// OwnerID is a wallet address
bytes OwnerID = 1 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message ListResponse {
// CID (container id) is list of SHA256 hashes of the container structures
repeated bytes CID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
}
message ExtendedACLKey {
// ID (container id) is a SHA256 hash of the container structure
bytes ID = 1 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
}
message ExtendedACLValue {
// EACL carries binary representation of the table of extended ACL rules
bytes EACL = 1;
// Signature carries EACL field signature
bytes Signature = 2;
}
message SetExtendedACLRequest {
// Key carries key to extended ACL information
ExtendedACLKey Key = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// Value carries extended ACL information
ExtendedACLValue Value = 2 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message SetExtendedACLResponse {}
message GetExtendedACLRequest {
// Key carries key to extended ACL information
ExtendedACLKey Key = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetExtendedACLResponse {
// ACL carries extended ACL information
ExtendedACLValue ACL = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}

View file

@ -1,194 +0,0 @@
package container
import (
"encoding/binary"
"io"
service "github.com/nspcc-dev/neofs-api-go/service"
)
var requestEndianness = binary.BigEndian
// SignedData returns payload bytes of the request.
func (m PutRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m PutRequest) SignedDataSize() (sz int) {
sz += m.GetMessageID().Size()
sz += 8
sz += m.GetOwnerID().Size()
rules := m.GetRules()
sz += rules.Size()
sz += 4
return
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m PutRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetMessageID().Bytes())
requestEndianness.PutUint64(p[off:], m.GetCapacity())
off += 8
off += copy(p[off:], m.GetOwnerID().Bytes())
rules := m.GetRules()
// FIXME: implement and use stable functions
n, err := rules.MarshalTo(p[off:])
off += n
if err != nil {
return off, err
}
requestEndianness.PutUint32(p[off:], m.GetBasicACL())
off += 4
return off, nil
}
// SignedData returns payload bytes of the request.
func (m DeleteRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m DeleteRequest) SignedDataSize() int {
return m.GetCID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m DeleteRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetCID().Bytes())
return off, nil
}
// SignedData returns payload bytes of the request.
func (m GetRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m GetRequest) SignedDataSize() int {
return m.GetCID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m GetRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetCID().Bytes())
return off, nil
}
// SignedData returns payload bytes of the request.
func (m ListRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m ListRequest) SignedDataSize() int {
return m.GetOwnerID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m ListRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetOwnerID().Bytes())
return off, nil
}
// SignedData returns payload bytes of the request.
func (m GetExtendedACLRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m GetExtendedACLRequest) SignedDataSize() int {
return m.GetID().Size()
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m GetExtendedACLRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetID().Bytes())
return off, nil
}
// SignedData returns payload bytes of the request.
func (m SetExtendedACLRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// SignedDataSize returns payload size of the request.
func (m SetExtendedACLRequest) SignedDataSize() int {
return 0 +
m.GetID().Size() +
len(m.GetEACL()) +
len(m.GetSignature())
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the Request size is insufficient, io.ErrUnexpectedEOF returns.
func (m SetExtendedACLRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.GetID().Bytes())
off += copy(p[off:], m.GetEACL())
off += copy(p[off:], m.GetSignature())
return off, nil
}

View file

@ -1,187 +0,0 @@
package container
import (
"testing"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestRequestSign(t *testing.T) {
sk := test.DecodeKey(0)
type sigType interface {
service.RequestData
service.SignKeyPairAccumulator
service.SignKeyPairSource
SetToken(*service.Token)
}
items := []struct {
constructor func() sigType
payloadCorrupt []func(sigType)
}{
{ // PutRequest
constructor: func() sigType {
return new(PutRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*PutRequest)
id := req.GetMessageID()
id[0]++
req.SetMessageID(id)
},
func(s sigType) {
req := s.(*PutRequest)
req.SetCapacity(req.GetCapacity() + 1)
},
func(s sigType) {
req := s.(*PutRequest)
owner := req.GetOwnerID()
owner[0]++
req.SetOwnerID(owner)
},
func(s sigType) {
req := s.(*PutRequest)
rules := req.GetRules()
rules.ReplFactor++
req.SetRules(rules)
},
func(s sigType) {
req := s.(*PutRequest)
req.SetBasicACL(req.GetBasicACL() + 1)
},
},
},
{ // DeleteRequest
constructor: func() sigType {
return new(DeleteRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*DeleteRequest)
cid := req.GetCID()
cid[0]++
req.SetCID(cid)
},
},
},
{ // GetRequest
constructor: func() sigType {
return new(GetRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*GetRequest)
cid := req.GetCID()
cid[0]++
req.SetCID(cid)
},
},
},
{ // ListRequest
constructor: func() sigType {
return new(ListRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*ListRequest)
owner := req.GetOwnerID()
owner[0]++
req.SetOwnerID(owner)
},
},
},
{ // GetExtendedACLRequest
constructor: func() sigType {
return new(GetExtendedACLRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*GetExtendedACLRequest)
id := req.GetID()
id[0]++
req.SetID(id)
},
},
},
{ // SetExtendedACLRequest
constructor: func() sigType {
return new(SetExtendedACLRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
req := s.(*SetExtendedACLRequest)
id := req.GetID()
id[0]++
req.SetID(id)
},
func(s sigType) {
req := s.(*SetExtendedACLRequest)
req.SetEACL(
append(req.GetEACL(), 1),
)
},
func(s sigType) {
req := s.(*SetExtendedACLRequest)
req.SetSignature(
append(req.GetSignature(), 1),
)
},
},
},
}
for _, item := range items {
{ // token corruptions
v := item.constructor()
token := new(service.Token)
v.SetToken(token)
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyRequestData(v))
}
{ // payload corruptions
for _, corruption := range item.payloadCorrupt {
v := item.constructor()
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
corruption(v)
require.Error(t, service.VerifyRequestData(v))
}
}
}
}

View file

@ -1,188 +0,0 @@
package container
import (
"bytes"
"github.com/gogo/protobuf/proto"
"github.com/google/uuid"
"github.com/nspcc-dev/neofs-api-go/internal"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/nspcc-dev/netmap"
"github.com/pkg/errors"
)
var (
_ internal.Custom = (*Container)(nil)
emptySalt = (UUID{}).Bytes()
emptyOwner = (OwnerID{}).Bytes()
)
// New creates new user container based on capacity, OwnerID, ACL and PlacementRules.
func New(cap uint64, owner OwnerID, acl uint32, rules netmap.PlacementRule) (*Container, error) {
if bytes.Equal(owner[:], emptyOwner) {
return nil, refs.ErrEmptyOwner
} else if cap == 0 {
return nil, refs.ErrEmptyCapacity
}
salt, err := uuid.NewRandom()
if err != nil {
return nil, errors.Wrap(err, "could not create salt")
}
return &Container{
OwnerID: owner,
Salt: UUID(salt),
Capacity: cap,
Rules: rules,
BasicACL: acl,
}, nil
}
// Bytes returns bytes representation of Container.
func (m *Container) Bytes() []byte {
data, err := m.Marshal()
if err != nil {
return nil
}
return data
}
// ID returns generated ContainerID based on Container (data).
func (m *Container) ID() (CID, error) {
if m.Empty() {
return CID{}, refs.ErrEmptyContainer
}
data, err := m.Marshal()
if err != nil {
return CID{}, err
}
return refs.CIDForBytes(data), nil
}
// Merge used by proto.Clone
func (m *Container) Merge(src proto.Message) {
if tmp, ok := src.(*Container); ok {
*m = *tmp
}
}
// Empty checks that container is empty.
func (m *Container) Empty() bool {
return m.Capacity == 0 || bytes.Equal(m.Salt.Bytes(), emptySalt) || bytes.Equal(m.OwnerID.Bytes(), emptyOwner)
}
// -- Test container definition -- //
// NewTestContainer returns test container.
// WARNING: DON'T USE THIS OUTSIDE TESTS.
func NewTestContainer() (*Container, error) {
key := test.DecodeKey(0)
owner, err := refs.NewOwnerID(&key.PublicKey)
if err != nil {
return nil, err
}
return New(100, owner, 0xFFFFFFFF, netmap.PlacementRule{
ReplFactor: 2,
SFGroups: []netmap.SFGroup{
{
Selectors: []netmap.Select{
{Key: "Country", Count: 1},
{Key: netmap.NodesBucket, Count: 2},
},
Filters: []netmap.Filter{
{Key: "Country", F: netmap.FilterIn("USA")},
},
},
},
})
}
// GetMessageID is a MessageID field getter.
func (m PutRequest) GetMessageID() MessageID {
return m.MessageID
}
// SetMessageID is a MessageID field getter.
func (m *PutRequest) SetMessageID(id MessageID) {
m.MessageID = id
}
// SetCapacity is a Capacity field setter.
func (m *PutRequest) SetCapacity(c uint64) {
m.Capacity = c
}
// GetOwnerID is an OwnerID field getter.
func (m PutRequest) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *PutRequest) SetOwnerID(owner OwnerID) {
m.OwnerID = owner
}
// SetRules is a Rules field setter.
func (m *PutRequest) SetRules(rules netmap.PlacementRule) {
m.Rules = rules
}
// SetBasicACL is a BasicACL field setter.
func (m *PutRequest) SetBasicACL(acl uint32) {
m.BasicACL = acl
}
// GetCID is a CID field getter.
func (m DeleteRequest) GetCID() CID {
return m.CID
}
// SetCID is a CID field setter.
func (m *DeleteRequest) SetCID(cid CID) {
m.CID = cid
}
// GetCID is a CID field getter.
func (m GetRequest) GetCID() CID {
return m.CID
}
// SetCID is a CID field setter.
func (m *GetRequest) SetCID(cid CID) {
m.CID = cid
}
// GetOwnerID is an OwnerID field getter.
func (m ListRequest) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *ListRequest) SetOwnerID(owner OwnerID) {
m.OwnerID = owner
}
// GetID is an ID field getter.
func (m ExtendedACLKey) GetID() CID {
return m.ID
}
// SetID is an ID field setter.
func (m *ExtendedACLKey) SetID(v CID) {
m.ID = v
}
// SetEACL is an EACL field setter.
func (m *ExtendedACLValue) SetEACL(v []byte) {
m.EACL = v
}
// SetSignature is a Signature field setter.
func (m *ExtendedACLValue) SetSignature(sig []byte) {
m.Signature = sig
}

View file

@ -1,507 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: container/types.proto
package container
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
netmap "github.com/nspcc-dev/netmap"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// The Container service definition.
type Container struct {
// OwnerID is a wallet address.
OwnerID OwnerID `protobuf:"bytes,1,opt,name=OwnerID,proto3,customtype=OwnerID" json:"OwnerID"`
// Salt is a nonce for unique container id calculation.
Salt UUID `protobuf:"bytes,2,opt,name=Salt,proto3,customtype=UUID" json:"Salt"`
// Capacity defines amount of data that can be stored in the container (doesn't used for now).
Capacity uint64 `protobuf:"varint,3,opt,name=Capacity,proto3" json:"Capacity,omitempty"`
// Rules define storage policy for the object inside the container.
Rules netmap.PlacementRule `protobuf:"bytes,4,opt,name=Rules,proto3" json:"Rules"`
// BasicACL with access control rules for owner, system, others and
// permission bits for bearer token and extended ACL.
BasicACL uint32 `protobuf:"varint,5,opt,name=BasicACL,proto3" json:"BasicACL,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Container) Reset() { *m = Container{} }
func (m *Container) String() string { return proto.CompactTextString(m) }
func (*Container) ProtoMessage() {}
func (*Container) Descriptor() ([]byte, []int) {
return fileDescriptor_1432e52ab0b53e3e, []int{0}
}
func (m *Container) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Container) XXX_Merge(src proto.Message) {
xxx_messageInfo_Container.Merge(m, src)
}
func (m *Container) XXX_Size() int {
return m.Size()
}
func (m *Container) XXX_DiscardUnknown() {
xxx_messageInfo_Container.DiscardUnknown(m)
}
var xxx_messageInfo_Container proto.InternalMessageInfo
func (m *Container) GetCapacity() uint64 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Container) GetRules() netmap.PlacementRule {
if m != nil {
return m.Rules
}
return netmap.PlacementRule{}
}
func (m *Container) GetBasicACL() uint32 {
if m != nil {
return m.BasicACL
}
return 0
}
func init() {
proto.RegisterType((*Container)(nil), "container.Container")
}
func init() { proto.RegisterFile("container/types.proto", fileDescriptor_1432e52ab0b53e3e) }
var fileDescriptor_1432e52ab0b53e3e = []byte{
// 315 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0xce, 0xcf, 0x2b,
0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0xd2, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f,
0xc9, 0x17, 0xe2, 0x84, 0x0b, 0x4b, 0x69, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7,
0xe7, 0xea, 0xe7, 0x15, 0x17, 0x24, 0x27, 0xeb, 0xa6, 0xa4, 0x96, 0xe9, 0xe7, 0xa5, 0x96, 0xe4,
0x26, 0x16, 0xe8, 0x17, 0xa7, 0xe6, 0xa4, 0x26, 0x97, 0xe4, 0x17, 0x41, 0xb4, 0x49, 0xe9, 0x22,
0xa9, 0x4d, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x0b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98,
0x05, 0x51, 0xae, 0x74, 0x98, 0x91, 0x8b, 0xd3, 0x19, 0x66, 0x91, 0x90, 0x26, 0x17, 0xbb, 0x7f,
0x79, 0x5e, 0x6a, 0x91, 0xa7, 0x8b, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x8f, 0x13, 0xff, 0x89, 0x7b,
0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0xc3, 0x84, 0x83, 0x60, 0x0c, 0x21, 0x05, 0x2e, 0x96, 0xe0, 0xc4,
0x9c, 0x12, 0x09, 0x26, 0xb0, 0x3a, 0x1e, 0xa8, 0x3a, 0x96, 0xd0, 0x50, 0x4f, 0x97, 0x20, 0xb0,
0x8c, 0x90, 0x14, 0x17, 0x87, 0x73, 0x62, 0x41, 0x62, 0x72, 0x66, 0x49, 0xa5, 0x04, 0xb3, 0x02,
0xa3, 0x06, 0x4b, 0x10, 0x9c, 0x2f, 0x64, 0xc8, 0xc5, 0x1a, 0x54, 0x9a, 0x93, 0x5a, 0x2c, 0xc1,
0xa2, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xaa, 0x07, 0xf1, 0x8c, 0x5e, 0x40, 0x4e, 0x62, 0x72, 0x6a,
0x6e, 0x6a, 0x5e, 0x09, 0x48, 0xd6, 0x89, 0x05, 0x64, 0x6a, 0x10, 0x44, 0x25, 0xc8, 0x38, 0xa7,
0xc4, 0xe2, 0xcc, 0x64, 0x47, 0x67, 0x1f, 0x09, 0x56, 0x05, 0x46, 0x0d, 0xde, 0x20, 0x38, 0xdf,
0x29, 0xfc, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x6f, 0x3c, 0x92, 0x63, 0x7c, 0xf0,
0x48, 0x8e, 0x71, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x6d, 0x1c, 0xc1, 0x96, 0x9f, 0x56, 0xac, 0x9b,
0x58, 0x90, 0xa9, 0x9b, 0x9e, 0xaf, 0x0f, 0x0f, 0xe3, 0x55, 0x4c, 0xc2, 0x7e, 0xa9, 0xf9, 0x6e,
0xc1, 0x7a, 0x8e, 0x01, 0x9e, 0x7a, 0xf0, 0x00, 0x49, 0x62, 0x03, 0x87, 0x92, 0x31, 0x20, 0x00,
0x00, 0xff, 0xff, 0xfc, 0x81, 0x55, 0xd6, 0xa4, 0x01, 0x00, 0x00,
}
func (m *Container) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Container) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.BasicACL != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.BasicACL))
i--
dAtA[i] = 0x28
}
{
size, err := m.Rules.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
if m.Capacity != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Capacity))
i--
dAtA[i] = 0x18
}
{
size := m.Salt.Size()
i -= size
if _, err := m.Salt.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size := m.OwnerID.Size()
i -= size
if _, err := m.OwnerID.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
offset -= sovTypes(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Container) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.OwnerID.Size()
n += 1 + l + sovTypes(uint64(l))
l = m.Salt.Size()
n += 1 + l + sovTypes(uint64(l))
if m.Capacity != 0 {
n += 1 + sovTypes(uint64(m.Capacity))
}
l = m.Rules.Size()
n += 1 + l + sovTypes(uint64(l))
if m.BasicACL != 0 {
n += 1 + sovTypes(uint64(m.BasicACL))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovTypes(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozTypes(x uint64) (n int) {
return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Container) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Container: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OwnerID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.OwnerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Salt", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Salt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
}
m.Capacity = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Capacity |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Rules.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BasicACL", wireType)
}
m.BasicACL = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BasicACL |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTypes(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthTypes
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupTypes
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthTypes
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group")
)

View file

@ -1,24 +0,0 @@
syntax = "proto3";
package container;
option go_package = "github.com/nspcc-dev/neofs-api-go/container";
option csharp_namespace = "NeoFS.API.Container";
import "github.com/nspcc-dev/netmap/selector.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// The Container service definition.
message Container {
// OwnerID is a wallet address.
bytes OwnerID = 1 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// Salt is a nonce for unique container id calculation.
bytes Salt = 2 [(gogoproto.customtype) = "UUID", (gogoproto.nullable) = false];
// Capacity defines amount of data that can be stored in the container (doesn't used for now).
uint64 Capacity = 3;
// Rules define storage policy for the object inside the container.
netmap.PlacementRule Rules = 4 [(gogoproto.nullable) = false];
// BasicACL with access control rules for owner, system, others and
// permission bits for bearer token and extended ACL.
uint32 BasicACL = 5;
}

View file

@ -1,162 +0,0 @@
package container
import (
"testing"
"github.com/gogo/protobuf/proto"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/nspcc-dev/netmap"
"github.com/stretchr/testify/require"
)
func TestCID(t *testing.T) {
t.Run("check that marshal/unmarshal works like expected", func(t *testing.T) {
var (
c2 Container
cid2 CID
key = test.DecodeKey(0)
)
rules := netmap.PlacementRule{
ReplFactor: 2,
SFGroups: []netmap.SFGroup{
{
Selectors: []netmap.Select{
{Key: "Country", Count: 1},
{Key: netmap.NodesBucket, Count: 2},
},
Filters: []netmap.Filter{
{Key: "Country", F: netmap.FilterIn("USA")},
},
},
},
}
owner, err := refs.NewOwnerID(&key.PublicKey)
require.NoError(t, err)
c1, err := New(10, owner, 0xDEADBEEF, rules)
require.NoError(t, err)
data, err := proto.Marshal(c1)
require.NoError(t, err)
require.NoError(t, c2.Unmarshal(data))
require.Equal(t, c1, &c2)
cid1, err := c1.ID()
require.NoError(t, err)
data, err = proto.Marshal(&cid1)
require.NoError(t, err)
require.NoError(t, cid2.Unmarshal(data))
require.Equal(t, cid1, cid2)
})
}
func TestPutRequestGettersSetters(t *testing.T) {
t.Run("owner", func(t *testing.T) {
owner := OwnerID{1, 2, 3}
m := new(PutRequest)
m.SetOwnerID(owner)
require.Equal(t, owner, m.GetOwnerID())
})
t.Run("capacity", func(t *testing.T) {
cp := uint64(3)
m := new(PutRequest)
m.SetCapacity(cp)
require.Equal(t, cp, m.GetCapacity())
})
t.Run("message ID", func(t *testing.T) {
id, err := refs.NewUUID()
require.NoError(t, err)
m := new(PutRequest)
m.SetMessageID(id)
require.Equal(t, id, m.GetMessageID())
})
t.Run("rules", func(t *testing.T) {
rules := netmap.PlacementRule{
ReplFactor: 1,
}
m := new(PutRequest)
m.SetRules(rules)
require.Equal(t, rules, m.GetRules())
})
t.Run("basic ACL", func(t *testing.T) {
bACL := uint32(5)
m := new(PutRequest)
m.SetBasicACL(bACL)
require.Equal(t, bACL, m.GetBasicACL())
})
}
func TestDeleteRequestGettersSetters(t *testing.T) {
t.Run("cid", func(t *testing.T) {
cid := CID{1, 2, 3}
m := new(DeleteRequest)
m.SetCID(cid)
require.Equal(t, cid, m.GetCID())
})
}
func TestGetRequestGettersSetters(t *testing.T) {
t.Run("cid", func(t *testing.T) {
cid := CID{1, 2, 3}
m := new(GetRequest)
m.SetCID(cid)
require.Equal(t, cid, m.GetCID())
})
}
func TestListRequestGettersSetters(t *testing.T) {
t.Run("owner", func(t *testing.T) {
owner := OwnerID{1, 2, 3}
m := new(PutRequest)
m.SetOwnerID(owner)
require.Equal(t, owner, m.GetOwnerID())
})
}
func TestExtendedACLKey(t *testing.T) {
s := new(ExtendedACLKey)
id := CID{1, 2, 3}
s.SetID(id)
require.Equal(t, id, s.GetID())
}
func TestExtendedACLValue(t *testing.T) {
s := new(ExtendedACLValue)
acl := []byte{1, 2, 3}
s.SetEACL(acl)
require.Equal(t, acl, s.GetEACL())
sig := []byte{4, 5, 6}
s.SetSignature(sig)
require.Equal(t, sig, s.GetSignature())
}

View file

@ -1,110 +0,0 @@
package decimal
import (
"math"
"strconv"
"strings"
)
// GASPrecision contains precision for NEO Gas token.
const GASPrecision = 8
// Zero is empty Decimal value.
var Zero = &Decimal{}
// New returns new Decimal (in satoshi).
func New(v int64) *Decimal {
return NewWithPrecision(v, GASPrecision)
}
// NewGAS returns new Decimal * 1e8 (in GAS).
func NewGAS(v int64) *Decimal {
v *= int64(math.Pow10(GASPrecision))
return NewWithPrecision(v, GASPrecision)
}
// NewWithPrecision returns new Decimal with custom precision.
func NewWithPrecision(v int64, p uint32) *Decimal {
return &Decimal{Value: v, Precision: p}
}
// ParseFloat return new Decimal parsed from float64 * 1e8 (in GAS).
func ParseFloat(v float64) *Decimal {
return new(Decimal).Parse(v, GASPrecision)
}
// ParseFloatWithPrecision returns new Decimal parsed from float64 * 1^p.
func ParseFloatWithPrecision(v float64, p int) *Decimal {
return new(Decimal).Parse(v, p)
}
// Copy returns copy of current Decimal.
func (m *Decimal) Copy() *Decimal { return &Decimal{Value: m.Value, Precision: m.Precision} }
// Parse returns parsed Decimal from float64 * 1^p.
func (m *Decimal) Parse(v float64, p int) *Decimal {
m.Value = int64(v * math.Pow10(p))
m.Precision = uint32(p)
return m
}
// String returns string representation of Decimal.
func (m Decimal) String() string {
buf := new(strings.Builder)
val := m.Value
dec := int64(math.Pow10(int(m.Precision)))
if val < 0 {
buf.WriteRune('-')
val = -val
}
str := strconv.FormatInt(val/dec, 10)
buf.WriteString(str)
val %= dec
if val > 0 {
buf.WriteRune('.')
str = strconv.FormatInt(val, 10)
for i := len(str); i < int(m.Precision); i++ {
buf.WriteRune('0')
}
buf.WriteString(strings.TrimRight(str, "0"))
}
return buf.String()
}
// Add returns d + m.
func (m Decimal) Add(d *Decimal) *Decimal {
precision := m.Precision
if precision < d.Precision {
precision = d.Precision
}
return &Decimal{
Value: m.Value + d.Value,
Precision: precision,
}
}
// Zero checks that Decimal is empty.
func (m Decimal) Zero() bool { return m.Value == 0 }
// Equal checks that current Decimal is equal to passed Decimal.
func (m Decimal) Equal(v *Decimal) bool { return m.Value == v.Value && m.Precision == v.Precision }
// GT checks that m > v.
func (m Decimal) GT(v *Decimal) bool { return m.Value > v.Value }
// GTE checks that m >= v.
func (m Decimal) GTE(v *Decimal) bool { return m.Value >= v.Value }
// LT checks that m < v.
func (m Decimal) LT(v *Decimal) bool { return m.Value < v.Value }
// LTE checks that m <= v.
func (m Decimal) LTE(v *Decimal) bool { return m.Value <= v.Value }
// Neg returns negative representation of current Decimal (m * -1).
func (m Decimal) Neg() *Decimal {
return &Decimal{
Value: m.Value * -1,
Precision: m.Precision,
}
}

View file

@ -1,349 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: decimal/decimal.proto
package decimal
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Decimal is a structure used for representation of assets amount
type Decimal struct {
// Value is value number
Value int64 `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"`
// Precision is precision number
Precision uint32 `protobuf:"varint,2,opt,name=Precision,proto3" json:"Precision,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Decimal) Reset() { *m = Decimal{} }
func (*Decimal) ProtoMessage() {}
func (*Decimal) Descriptor() ([]byte, []int) {
return fileDescriptor_e7e70e1773836c80, []int{0}
}
func (m *Decimal) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Decimal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Decimal) XXX_Merge(src proto.Message) {
xxx_messageInfo_Decimal.Merge(m, src)
}
func (m *Decimal) XXX_Size() int {
return m.Size()
}
func (m *Decimal) XXX_DiscardUnknown() {
xxx_messageInfo_Decimal.DiscardUnknown(m)
}
var xxx_messageInfo_Decimal proto.InternalMessageInfo
func (m *Decimal) GetValue() int64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Decimal) GetPrecision() uint32 {
if m != nil {
return m.Precision
}
return 0
}
func init() {
proto.RegisterType((*Decimal)(nil), "decimal.Decimal")
}
func init() { proto.RegisterFile("decimal/decimal.proto", fileDescriptor_e7e70e1773836c80) }
var fileDescriptor_e7e70e1773836c80 = []byte{
// 201 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x49, 0x4d, 0xce,
0xcc, 0x4d, 0xcc, 0xd1, 0x87, 0xd2, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xec, 0x50, 0xae,
0x94, 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x7a,
0xbe, 0x3e, 0x58, 0x3e, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0, 0x2c, 0x88, 0x3e, 0x25, 0x67,
0x2e, 0x76, 0x17, 0x88, 0x4e, 0x21, 0x11, 0x2e, 0xd6, 0xb0, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x46,
0x05, 0x46, 0x0d, 0xe6, 0x20, 0x08, 0x47, 0x48, 0x86, 0x8b, 0x33, 0xa0, 0x28, 0x35, 0x39, 0xb3,
0x38, 0x33, 0x3f, 0x4f, 0x82, 0x49, 0x81, 0x51, 0x83, 0x37, 0x08, 0x21, 0x60, 0xc5, 0x32, 0x63,
0x81, 0x3c, 0x83, 0x53, 0xf0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0xde, 0x78, 0x24,
0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x51, 0x9a, 0x48, 0x2e, 0xc9, 0x2b,
0x2e, 0x48, 0x4e, 0xd6, 0x4d, 0x49, 0x2d, 0xd3, 0xcf, 0x4b, 0xcd, 0x4f, 0x2b, 0xd6, 0x4d, 0x2c,
0xc8, 0xd4, 0x4d, 0xcf, 0x87, 0xf9, 0x61, 0x15, 0x93, 0xa0, 0x5f, 0x6a, 0xbe, 0x5b, 0xb0, 0x9e,
0x63, 0x80, 0xa7, 0x1e, 0xd4, 0x39, 0x49, 0x6c, 0x60, 0x07, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff,
0xff, 0xa5, 0x41, 0x4c, 0x9f, 0xf1, 0x00, 0x00, 0x00,
}
func (m *Decimal) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Decimal) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Decimal) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Precision != 0 {
i = encodeVarintDecimal(dAtA, i, uint64(m.Precision))
i--
dAtA[i] = 0x10
}
if m.Value != 0 {
i = encodeVarintDecimal(dAtA, i, uint64(m.Value))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintDecimal(dAtA []byte, offset int, v uint64) int {
offset -= sovDecimal(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Decimal) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Value != 0 {
n += 1 + sovDecimal(uint64(m.Value))
}
if m.Precision != 0 {
n += 1 + sovDecimal(uint64(m.Precision))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovDecimal(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozDecimal(x uint64) (n int) {
return sovDecimal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Decimal) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDecimal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Decimal: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Decimal: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
m.Value = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDecimal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Value |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Precision", wireType)
}
m.Precision = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDecimal
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Precision |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipDecimal(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDecimal
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthDecimal
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipDecimal(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDecimal
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDecimal
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDecimal
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthDecimal
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupDecimal
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthDecimal
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthDecimal = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowDecimal = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupDecimal = fmt.Errorf("proto: unexpected end of group")
)

View file

@ -1,18 +0,0 @@
syntax = "proto3";
package decimal;
option go_package = "github.com/nspcc-dev/neofs-api-go/decimal";
option csharp_namespace = "NeoFS.API.Decimal";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Decimal is a structure used for representation of assets amount
message Decimal {
option (gogoproto.goproto_stringer) = false;
// Value is value number
int64 Value = 1;
// Precision is precision number
uint32 Precision = 2;
}

View file

@ -1,445 +0,0 @@
package decimal
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestDecimal_Parse(t *testing.T) {
tests := []struct {
value float64
name string
expect *Decimal
}{
{name: "empty", expect: &Decimal{Precision: GASPrecision}},
{
value: 100,
name: "100 GAS",
expect: &Decimal{Value: 1e10, Precision: GASPrecision},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.Equal(t, tt.expect, ParseFloat(tt.value))
})
}
}
func TestDecimal_ParseWithPrecision(t *testing.T) {
type args struct {
v float64
p int
}
tests := []struct {
args args
name string
expect *Decimal
}{
{name: "empty", expect: &Decimal{}},
{
name: "empty precision",
expect: &Decimal{Value: 0, Precision: 0},
},
{
name: "100 GAS",
args: args{100, GASPrecision},
expect: &Decimal{Value: 1e10, Precision: GASPrecision},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.Equal(t, tt.expect,
ParseFloatWithPrecision(tt.args.v, tt.args.p))
})
}
}
func TestNew(t *testing.T) {
tests := []struct {
name string
val int64
expect *Decimal
}{
{name: "empty", expect: &Decimal{Value: 0, Precision: GASPrecision}},
{name: "100 GAS", val: 1e10, expect: &Decimal{Value: 1e10, Precision: GASPrecision}},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.Equalf(t, tt.expect, New(tt.val), tt.name)
})
}
}
func TestNewGAS(t *testing.T) {
tests := []struct {
name string
val int64
expect *Decimal
}{
{name: "empty", expect: &Decimal{Value: 0, Precision: GASPrecision}},
{name: "100 GAS", val: 100, expect: &Decimal{Value: 1e10, Precision: GASPrecision}},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.Equalf(t, tt.expect, NewGAS(tt.val), tt.name)
})
}
}
func TestNewWithPrecision(t *testing.T) {
tests := []struct {
name string
val int64
pre uint32
expect *Decimal
}{
{name: "empty", expect: &Decimal{}},
{name: "100 GAS", val: 1e10, pre: GASPrecision, expect: &Decimal{Value: 1e10, Precision: GASPrecision}},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.Equalf(t, tt.expect, NewWithPrecision(tt.val, tt.pre), tt.name)
})
}
}
func TestDecimal_Neg(t *testing.T) {
tests := []struct {
name string
val int64
expect *Decimal
}{
{name: "empty", expect: &Decimal{Value: 0, Precision: GASPrecision}},
{name: "100 GAS", val: 1e10, expect: &Decimal{Value: -1e10, Precision: GASPrecision}},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Equalf(t, tt.expect, New(tt.val).Neg(), tt.name)
}, tt.name)
})
}
}
func TestDecimal_String(t *testing.T) {
tests := []struct {
name string
expect string
value *Decimal
}{
{name: "empty", expect: "0", value: &Decimal{}},
{name: "100 GAS", expect: "100", value: &Decimal{Value: 1e10, Precision: GASPrecision}},
{name: "-100 GAS", expect: "-100", value: &Decimal{Value: -1e10, Precision: GASPrecision}},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.Equalf(t, tt.expect, tt.value.String(), tt.name)
})
}
}
const SomethingElsePrecision = 5
func TestDecimal_Add(t *testing.T) {
tests := []struct {
name string
expect *Decimal
values [2]*Decimal
}{
{name: "empty", expect: &Decimal{}, values: [2]*Decimal{{}, {}}},
{
name: "5 GAS + 2 GAS",
expect: &Decimal{Value: 7e8, Precision: GASPrecision},
values: [2]*Decimal{
{Value: 2e8, Precision: GASPrecision},
{Value: 5e8, Precision: GASPrecision},
},
},
{
name: "1e2 + 1e3",
expect: &Decimal{Value: 1.1e3, Precision: 3},
values: [2]*Decimal{
{Value: 1e2, Precision: 2},
{Value: 1e3, Precision: 3},
},
},
{
name: "5 GAS + 10 SomethingElse",
expect: &Decimal{Value: 5.01e8, Precision: GASPrecision},
values: [2]*Decimal{
{Value: 5e8, Precision: GASPrecision},
{Value: 1e6, Precision: SomethingElsePrecision},
},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
{ // A + B
one := tt.values[0]
two := tt.values[1]
require.Equalf(t, tt.expect, one.Add(two), tt.name)
t.Log(one.Add(two))
}
{ // B + A
one := tt.values[0]
two := tt.values[1]
require.Equalf(t, tt.expect, two.Add(one), tt.name)
t.Log(two.Add(one))
}
}, tt.name)
})
}
}
func TestDecimal_Copy(t *testing.T) {
tests := []struct {
name string
expect *Decimal
value *Decimal
}{
{name: "zero", expect: Zero},
{
name: "5 GAS",
expect: &Decimal{Value: 5e8, Precision: GASPrecision},
},
{
name: "100 GAS",
expect: &Decimal{Value: 1e10, Precision: GASPrecision},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Equal(t, tt.expect, tt.expect.Copy())
}, tt.name)
})
}
}
func TestDecimal_Zero(t *testing.T) {
tests := []struct {
name string
expect bool
value *Decimal
}{
{name: "zero", expect: true, value: Zero},
{
name: "5 GAS",
expect: false,
value: &Decimal{Value: 5e8, Precision: GASPrecision},
},
{
name: "100 GAS",
expect: false,
value: &Decimal{Value: 1e10, Precision: GASPrecision},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Truef(t, tt.expect == tt.value.Zero(), tt.name)
}, tt.name)
})
}
}
func TestDecimal_Equal(t *testing.T) {
tests := []struct {
name string
expect bool
values [2]*Decimal
}{
{name: "zero == zero", expect: true, values: [2]*Decimal{Zero, Zero}},
{
name: "5 GAS != 2 GAS",
expect: false,
values: [2]*Decimal{
{Value: 5e8, Precision: GASPrecision},
{Value: 2e8, Precision: GASPrecision},
},
},
{
name: "100 GAS == 100 GAS",
expect: true,
values: [2]*Decimal{
{Value: 1e10, Precision: GASPrecision},
{Value: 1e10, Precision: GASPrecision},
},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Truef(t, tt.expect == (tt.values[0].Equal(tt.values[1])), tt.name)
}, tt.name)
})
}
}
func TestDecimal_GT(t *testing.T) {
tests := []struct {
name string
expect bool
values [2]*Decimal
}{
{name: "two zeros", expect: false, values: [2]*Decimal{Zero, Zero}},
{
name: "5 GAS > 2 GAS",
expect: true,
values: [2]*Decimal{
{Value: 5e8, Precision: GASPrecision},
{Value: 2e8, Precision: GASPrecision},
},
},
{
name: "100 GAS !> 100 GAS",
expect: false,
values: [2]*Decimal{
{Value: 1e10, Precision: GASPrecision},
{Value: 1e10, Precision: GASPrecision},
},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Truef(t, tt.expect == (tt.values[0].GT(tt.values[1])), tt.name)
}, tt.name)
})
}
}
func TestDecimal_GTE(t *testing.T) {
tests := []struct {
name string
expect bool
values [2]*Decimal
}{
{name: "two zeros", expect: true, values: [2]*Decimal{Zero, Zero}},
{
name: "5 GAS >= 2 GAS",
expect: true,
values: [2]*Decimal{
{Value: 5e8, Precision: GASPrecision},
{Value: 2e8, Precision: GASPrecision},
},
},
{
name: "1 GAS !>= 100 GAS",
expect: false,
values: [2]*Decimal{
{Value: 1e8, Precision: GASPrecision},
{Value: 1e10, Precision: GASPrecision},
},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Truef(t, tt.expect == (tt.values[0].GTE(tt.values[1])), tt.name)
}, tt.name)
})
}
}
func TestDecimal_LT(t *testing.T) {
tests := []struct {
name string
expect bool
values [2]*Decimal
}{
{name: "two zeros", expect: false, values: [2]*Decimal{Zero, Zero}},
{
name: "5 GAS !< 2 GAS",
expect: false,
values: [2]*Decimal{
{Value: 5e8, Precision: GASPrecision},
{Value: 2e8, Precision: GASPrecision},
},
},
{
name: "1 GAS < 100 GAS",
expect: true,
values: [2]*Decimal{
{Value: 1e8, Precision: GASPrecision},
{Value: 1e10, Precision: GASPrecision},
},
},
{
name: "100 GAS !< 100 GAS",
expect: false,
values: [2]*Decimal{
{Value: 1e10, Precision: GASPrecision},
{Value: 1e10, Precision: GASPrecision},
},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Truef(t, tt.expect == (tt.values[0].LT(tt.values[1])), tt.name)
}, tt.name)
})
}
}
func TestDecimal_LTE(t *testing.T) {
tests := []struct {
name string
expect bool
values [2]*Decimal
}{
{name: "two zeros", expect: true, values: [2]*Decimal{Zero, Zero}},
{
name: "5 GAS <= 2 GAS",
expect: false,
values: [2]*Decimal{
{Value: 5e8, Precision: GASPrecision},
{Value: 2e8, Precision: GASPrecision},
},
},
{
name: "1 GAS <= 100 GAS",
expect: true,
values: [2]*Decimal{
{Value: 1e8, Precision: GASPrecision},
{Value: 1e10, Precision: GASPrecision},
},
},
{
name: "100 GAS !<= 1 GAS",
expect: false,
values: [2]*Decimal{
{Value: 1e10, Precision: GASPrecision},
{Value: 1e8, Precision: GASPrecision},
},
},
}
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
require.NotPanicsf(t, func() {
require.Truef(t, tt.expect == (tt.values[0].LTE(tt.values[1])), tt.name)
}, tt.name)
})
}
}

View file

@ -1,106 +0,0 @@
package hash
import (
"bytes"
"github.com/gogo/protobuf/proto"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neofs-api-go/internal"
"github.com/nspcc-dev/tzhash/tz"
"github.com/pkg/errors"
)
// HomomorphicHashSize contains size of HH.
const HomomorphicHashSize = 64
// Hash is implementation of HomomorphicHash.
type Hash [HomomorphicHashSize]byte
// ErrWrongDataSize raised when wrong size of bytes is passed to unmarshal HH.
const ErrWrongDataSize = internal.Error("wrong data size")
var (
_ internal.Custom = (*Hash)(nil)
emptyHH [HomomorphicHashSize]byte
)
// Size returns size of Hash (HomomorphicHashSize).
func (h Hash) Size() int { return HomomorphicHashSize }
// Empty checks that Hash is empty.
func (h Hash) Empty() bool { return bytes.Equal(h.Bytes(), emptyHH[:]) }
// Reset sets current Hash to empty value.
func (h *Hash) Reset() { *h = Hash{} }
// ProtoMessage method to satisfy proto.Message interface.
func (h Hash) ProtoMessage() {}
// Bytes represents Hash as bytes.
func (h Hash) Bytes() []byte {
buf := make([]byte, HomomorphicHashSize)
copy(buf, h[:])
return h[:]
}
// Marshal returns bytes representation of Hash.
func (h Hash) Marshal() ([]byte, error) { return h.Bytes(), nil }
// MarshalTo tries to marshal Hash into passed bytes and returns count of copied bytes.
func (h *Hash) MarshalTo(data []byte) (int, error) { return copy(data, h.Bytes()), nil }
// Unmarshal tries to parse bytes into valid Hash.
func (h *Hash) Unmarshal(data []byte) error {
if ln := len(data); ln != HomomorphicHashSize {
return errors.Wrapf(ErrWrongDataSize, "expect=%d, actual=%d", HomomorphicHashSize, ln)
}
copy((*h)[:], data)
return nil
}
// String returns string representation of Hash.
func (h Hash) String() string { return base58.Encode(h[:]) }
// Equal checks that current Hash is equal to passed Hash.
func (h Hash) Equal(hash Hash) bool { return h == hash }
// Verify validates if current hash generated from passed data.
func (h Hash) Verify(data []byte) bool { return h.Equal(Sum(data)) }
// Validate checks if combined hashes are equal to current Hash.
func (h Hash) Validate(hashes []Hash) bool {
hashBytes := make([][]byte, 0, len(hashes))
for i := range hashes {
hashBytes = append(hashBytes, hashes[i].Bytes())
}
ok, err := tz.Validate(h.Bytes(), hashBytes)
return err == nil && ok
}
// Merge used by proto.Clone
func (h *Hash) Merge(src proto.Message) {
if tmp, ok := src.(*Hash); ok {
*h = *tmp
}
}
// Sum returns Tillich-Zémor checksum of data.
func Sum(data []byte) Hash { return tz.Sum(data) }
// Concat combines hashes based on homomorphic property.
func Concat(hashes []Hash) (Hash, error) {
var (
hash Hash
h = make([][]byte, 0, len(hashes))
)
for i := range hashes {
h = append(h, hashes[i].Bytes())
}
cat, err := tz.Concat(h)
if err != nil {
return hash, err
}
return hash, hash.Unmarshal(cat)
}

View file

@ -1,166 +0,0 @@
package hash
import (
"bytes"
"crypto/rand"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
func Test_Sum(t *testing.T) {
var (
data = []byte("Hello world")
sum = Sum(data)
hash = []byte{0, 0, 0, 0, 1, 79, 16, 173, 134, 90, 176, 77, 114, 165, 253, 114, 0, 0, 0, 0, 0, 148,
172, 222, 98, 248, 15, 99, 205, 129, 66, 91, 0, 0, 0, 0, 0, 138, 173, 39, 228, 231, 239, 123,
170, 96, 186, 61, 0, 0, 0, 0, 0, 90, 69, 237, 131, 90, 161, 73, 38, 164, 185, 55}
)
require.Equal(t, hash, sum.Bytes())
}
func Test_Validate(t *testing.T) {
var (
data = []byte("Hello world")
hash = Sum(data)
pieces = splitData(data, 2)
ln = len(pieces)
hashes = make([]Hash, 0, ln)
)
for i := 0; i < ln; i++ {
hashes = append(hashes, Sum(pieces[i]))
}
require.True(t, hash.Validate(hashes))
}
func Test_Concat(t *testing.T) {
var (
data = []byte("Hello world")
hash = Sum(data)
pieces = splitData(data, 2)
ln = len(pieces)
hashes = make([]Hash, 0, ln)
)
for i := 0; i < ln; i++ {
hashes = append(hashes, Sum(pieces[i]))
}
res, err := Concat(hashes)
require.NoError(t, err)
require.Equal(t, hash, res)
}
func Test_HashChunks(t *testing.T) {
var (
chars = []byte("+")
size = 1400
data = bytes.Repeat(chars, size)
hash = Sum(data)
count = 150
)
hashes, err := dataHashes(data, count)
require.NoError(t, err)
require.Len(t, hashes, count)
require.True(t, hash.Validate(hashes))
// 100 / 150 = 0
hashes, err = dataHashes(data[:100], count)
require.Error(t, err)
require.Nil(t, hashes)
}
func TestXOR(t *testing.T) {
var (
dl = 10
data = make([]byte, dl)
)
_, err := rand.Read(data)
require.NoError(t, err)
t.Run("XOR with <nil> salt", func(t *testing.T) {
res := SaltXOR(data, nil)
require.Equal(t, res, data)
})
t.Run("XOR with empty salt", func(t *testing.T) {
xorWithSalt(t, data, 0)
})
t.Run("XOR with salt same data size", func(t *testing.T) {
xorWithSalt(t, data, dl)
})
t.Run("XOR with salt shorter than data aliquot", func(t *testing.T) {
xorWithSalt(t, data, dl/2)
})
t.Run("XOR with salt shorter than data aliquant", func(t *testing.T) {
xorWithSalt(t, data, dl/3/+1)
})
t.Run("XOR with salt longer than data aliquot", func(t *testing.T) {
xorWithSalt(t, data, dl*2)
})
t.Run("XOR with salt longer than data aliquant", func(t *testing.T) {
xorWithSalt(t, data, dl*2-1)
})
}
func xorWithSalt(t *testing.T, data []byte, saltSize int) {
var (
direct, reverse []byte
salt = make([]byte, saltSize)
)
_, err := rand.Read(salt)
require.NoError(t, err)
direct = SaltXOR(data, salt)
require.Len(t, direct, len(data))
reverse = SaltXOR(direct, salt)
require.Len(t, reverse, len(data))
require.Equal(t, reverse, data)
}
func splitData(buf []byte, lim int) [][]byte {
var piece []byte
pieces := make([][]byte, 0, len(buf)/lim+1)
for len(buf) >= lim {
piece, buf = buf[:lim], buf[lim:]
pieces = append(pieces, piece)
}
if len(buf) > 0 {
pieces = append(pieces, buf)
}
return pieces
}
func dataHashes(data []byte, count int) ([]Hash, error) {
var (
ln = len(data)
mis = ln / count
off = (count - 1) * mis
hashes = make([]Hash, 0, count)
)
if mis == 0 {
return nil, errors.Errorf("could not split %d bytes to %d pieces", ln, count)
}
pieces := splitData(data[:off], mis)
pieces = append(pieces, data[off:])
for i := 0; i < count; i++ {
hashes = append(hashes, Sum(pieces[i]))
}
return hashes, nil
}

View file

@ -1,20 +0,0 @@
package hash
import (
"bytes"
)
// HashesSlice is a collection that satisfies sort.Interface and can be
// sorted by the routines in sort package.
type HashesSlice []Hash
// -- HashesSlice -- an inner type to sort Objects
// Len is the number of elements in the collection.
func (hs HashesSlice) Len() int { return len(hs) }
// Less reports whether the element with
// index i should be sorted before the element with index j.
func (hs HashesSlice) Less(i, j int) bool { return bytes.Compare(hs[i].Bytes(), hs[j].Bytes()) == -1 }
// Swap swaps the elements with indexes i and j.
func (hs HashesSlice) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] }

View file

@ -1,17 +0,0 @@
package hash
// SaltXOR xors bits of data with salt
// repeating salt if necessary.
func SaltXOR(data, salt []byte) (result []byte) {
result = make([]byte, len(data))
ls := len(salt)
if ls == 0 {
copy(result, data)
return
}
for i := range result {
result[i] = data[i] ^ salt[i%ls]
}
return
}

View file

@ -1,7 +0,0 @@
package internal
// Error is a custom error.
type Error string
// Error is an implementation of error interface.
func (e Error) Error() string { return string(e) }

View file

@ -1,19 +0,0 @@
package internal
import "github.com/gogo/protobuf/proto"
// Custom contains methods to satisfy proto.Message
// including custom methods to satisfy protobuf for
// non-proto defined types.
type Custom interface {
Size() int
Empty() bool
Bytes() []byte
Marshal() ([]byte, error)
MarshalTo(data []byte) (int, error)
Unmarshal(data []byte) error
proto.Message
// Should contains for proto.Clone
proto.Merger
}

View file

@ -1,143 +0,0 @@
/*
Package object manages main storage structure in the system. All storage
operations are performed with the objects. During lifetime object might be
transformed into another object by cutting its payload or adding meta
information. All transformation may be reversed, therefore source object
will be able to restore.
Object structure
Object consists of Payload and Header. Payload is unlimited but storage nodes
may have a policy to store objects with a limited payload. In this case object
with large payload will be transformed into the chain of objects with small
payload.
Headers are simple key-value fields that divided into two groups: system
headers and extended headers. System headers contain information about
protocol version, object id, payload length in bytes, owner id, container id
and object creation timestamp (both in epochs and unix time). All these fields
must be set up in the correct object.
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
| System Headers |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
| Version : 1 |
| Payload Length : 21673465 |
| Object ID : 465208e2-ba4f-4f99-ad47-82a59f4192d4 |
| Owner ID : AShvoCbSZ7VfRiPkVb1tEcBLiJrcbts1tt |
| Container ID : FGobtRZA6sBZv2i9k4L7TiTtnuP6E788qa278xfj3Fxj |
| Created At : Epoch#10, 1573033162 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
| Extended Headers |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
| User Header : <user-defined-key>, <user-defined-value> |
| Verification Header : <session public key>, <owner's signature> |
| Homomorphic Hash : 0x23d35a56ae... |
| Payload Checksum : 0x1bd34abs75... |
| Integrity Header : <header checksum>, <session signature> |
| Transformation : Payload Split |
| Link-parent : cae08935-b4ba-499a-bf6c-98276c1e6c0b |
| Link-next : c3b40fbf-3798-4b61-a189-2992b5fb5070 |
| Payload Checksum : 0x1f387a5c36... |
| Integrity Header : <header checksum>, <session signature> |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
| Payload |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
| 0xd1581963a342d231... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
There are different kinds of extended headers. A correct object must contain
verification header, homomorphic hash header, payload checksum and
integrity header. The order of headers is matter. Let's look through all
these headers.
Link header points to the connected objects. During object transformation, large
object might be transformed into the chain of smaller objects. One of these
objects drops payload and has several "Child" links. We call this object as
zero-object. Others will have "Parent" link to the zero-object, "Previous"
and "Next" links in the payload chain.
[ Object ID:1 ] = > transformed
`- [ Zero-Object ID:1 ]
`- Link-child ID:2
`- Link-child ID:3
`- Link-child ID:4
`- Payload [null]
`- [ Object ID:2 ]
`- Link-parent ID:1
`- Link-next ID:3
`- Payload [ 0x13ba... ]
`- [ Object ID:3 ]
`- Link-parent ID:1
`- Link-previous ID:2
`- Link-next ID:4
`- Payload [ 0xcd34... ]
`- [ Object ID:4 ]
`- Link-parent ID:1
`- Link-previous ID:3
`- Payload [ 0xef86... ]
Storage groups are also objects. They have "Storage Group" links to all
objects in the group. Links are set by nodes during transformations and,
in general, they should not be set by user manually.
Redirect headers are not used yet, they will be implemented and described
later.
User header is a key-value pair of string that can be defined by user. User
can use these headers as search attribute. You can store any meta information
about object there, e.g. object's nicename.
Transformation header notifies that object was transformed by some pre-defined
way. This header sets up before object is transformed and all headers after
transformation must be located after transformation header. During reverse
transformation, all headers under transformation header will be cut out.
+-+-+-+-+-+-+-+-+-+- +-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+
| Payload checksum | | Payload checksum | | Payload checksum |
| Integrity header | => | Integrity header | + | Integrity header |
+-+-+-+-+-+-+-+-+-+- | Transformation | | Transformation |
| Large payload | | New Checksum | | New Checksum |
+-+-+-+-+-+-+-+-+-+- | New Integrity | | New Integrity |
+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+
| Small payload | | Small payload |
+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+
For now, we use only one type of transformation: payload split transformation.
This header set up by node automatically.
Tombstone header notifies that this object was deleted by user. Objects with
tombstone header do not have payload, but they still contain meta information
in the headers. This way we implement two-phase commit for object removal.
Storage nodes will eventually delete all tombstone objects. If you want to
delete object, you must create new object with the same object id, with
tombstone header, correct signatures and without payload.
Verification header contains session information. To put the object in
the system user must create session. It is required because objects might
be transformed and therefore must be re-signed. To do that node creates
a pair of session public and private keys. Object owner delegates permission to
re-sign objects by signing session public key. This header contains session
public key and owner's signature of this key. You must specify this header
manually.
Homomorphic hash header contains homomorphic hash of the source object.
Transformations do not affect this header. This header used by data audit and
set by node automatically.
Payload checksum contains checksum of the actual object payload. All payload
transformation must set new payload checksum headers. This header set by node
automatically.
Integrity header contains checksum of the header and signature of the
session key. This header must be last in the list of extended headers.
Checksum is calculated by marshaling all above headers, including system
headers. This header set by node automatically.
Storage group header is presented in storage group objects. It contains
information for data audit: size of validated data, homomorphic has of this
data, storage group expiration time in epochs or unix time.
*/
package object

View file

@ -1,64 +0,0 @@
package object
// todo: all extensions must be transferred to the separate util library
import "github.com/nspcc-dev/neofs-api-go/storagegroup"
// IsLinking checks if object has children links to another objects.
// We have to check payload size because zero-object must have zero
// payload and non-zero payload length field in system header.
func (m Object) IsLinking() bool {
for i := range m.Headers {
switch v := m.Headers[i].Value.(type) {
case *Header_Link:
if v.Link.GetType() == Link_Child {
return m.SystemHeader.PayloadLength > 0 && len(m.Payload) == 0
}
}
}
return false
}
// Links returns slice of ids of specified link type
func (m *Object) Links(t Link_Type) []ID {
var res []ID
for i := range m.Headers {
switch v := m.Headers[i].Value.(type) {
case *Header_Link:
if v.Link.GetType() == t {
res = append(res, v.Link.ID)
}
}
}
return res
}
// Tombstone returns tombstone header if it is presented in extended headers.
func (m Object) Tombstone() *Tombstone {
_, h := m.LastHeader(HeaderType(TombstoneHdr))
if h != nil {
return h.Value.(*Header_Tombstone).Tombstone
}
return nil
}
// IsTombstone checks if object has tombstone header.
func (m Object) IsTombstone() bool {
n, _ := m.LastHeader(HeaderType(TombstoneHdr))
return n != -1
}
// StorageGroup returns storage group structure if it is presented in extended headers.
func (m Object) StorageGroup() (*storagegroup.StorageGroup, error) {
_, sgHdr := m.LastHeader(HeaderType(StorageGroupHdr))
if sgHdr == nil {
return nil, ErrHeaderNotFound
}
return sgHdr.Value.(*Header_StorageGroup).StorageGroup, nil
}
// SetStorageGroup sets storage group header in the object.
// It will replace existing storage group header or add a new one.
func (m *Object) SetStorageGroup(group *storagegroup.StorageGroup) {
m.SetHeader(&Header{Value: &Header_StorageGroup{StorageGroup: group}})
}

View file

@ -1,196 +0,0 @@
package object
import (
"github.com/nspcc-dev/neofs-api-go/hash"
"github.com/nspcc-dev/neofs-api-go/internal"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/session"
)
type (
// ID is a type alias of object id.
ID = refs.ObjectID
// CID is a type alias of container id.
CID = refs.CID
// SGID is a type alias of storage group id.
SGID = refs.SGID
// OwnerID is a type alias of owner id.
OwnerID = refs.OwnerID
// Hash is a type alias of Homomorphic hash.
Hash = hash.Hash
// Token is a type alias of session token.
Token = session.Token
// Request defines object rpc requests.
// All object operations must have TTL, Epoch, Type, Container ID and
// permission of usage previous network map.
Request interface {
service.SeizedRequestMetaContainer
CID() CID
Type() RequestType
AllowPreviousNetMap() bool
}
)
const (
// starts enum for amount of bytes.
_ int64 = 1 << (10 * iota)
// UnitsKB defines amount of bytes in one kilobyte.
UnitsKB
// UnitsMB defines amount of bytes in one megabyte.
UnitsMB
// UnitsGB defines amount of bytes in one gigabyte.
UnitsGB
// UnitsTB defines amount of bytes in one terabyte.
UnitsTB
)
const (
// ErrNotFound is raised when object is not found in the system.
ErrNotFound = internal.Error("could not find object")
// ErrHeaderExpected is raised when first message in protobuf stream does not contain user header.
ErrHeaderExpected = internal.Error("expected header as a first message in stream")
// KeyStorageGroup is a key for a search object by storage group id.
KeyStorageGroup = "STORAGE_GROUP"
// KeyNoChildren is a key for searching object that have no children links.
KeyNoChildren = "LEAF"
// KeyParent is a key for searching object by id of parent object.
KeyParent = "PARENT"
// KeyHasParent is a key for searching object that have parent link.
KeyHasParent = "HAS_PAR"
// KeyTombstone is a key for searching object that have tombstone header.
KeyTombstone = "TOMBSTONE"
// KeyChild is a key for searching object by id of child link.
KeyChild = "CHILD"
// KeyPrev is a key for searching object by id of previous link.
KeyPrev = "PREV"
// KeyNext is a key for searching object by id of next link.
KeyNext = "NEXT"
// KeyID is a key for searching object by object id.
KeyID = "ID"
// KeyCID is a key for searching object by container id.
KeyCID = "CID"
// KeyOwnerID is a key for searching object by owner id.
KeyOwnerID = "OWNERID"
// KeyRootObject is a key for searching object that are zero-object or do
// not have any children.
KeyRootObject = "ROOT_OBJECT"
)
func checkIsNotFull(v interface{}) bool {
var obj *Object
switch t := v.(type) {
case *GetResponse:
obj = t.GetObject()
case *PutRequest:
if h := t.GetHeader(); h != nil {
obj = h.Object
}
default:
panic("unknown type")
}
return obj == nil || obj.SystemHeader.PayloadLength != uint64(len(obj.Payload)) && !obj.IsLinking()
}
// NotFull checks if protobuf stream provided whole object for get operation.
func (m *GetResponse) NotFull() bool { return checkIsNotFull(m) }
// NotFull checks if protobuf stream provided whole object for put operation.
func (m *PutRequest) NotFull() bool { return checkIsNotFull(m) }
// CID returns container id value from object put request.
func (m *PutRequest) CID() (cid CID) {
if header := m.GetHeader(); header == nil {
return
} else if obj := header.GetObject(); obj == nil {
return
} else {
return obj.SystemHeader.CID
}
}
// CID returns container id value from object get request.
func (m *GetRequest) CID() CID { return m.Address.CID }
// CID returns container id value from object head request.
func (m *HeadRequest) CID() CID { return m.Address.CID }
// CID returns container id value from object search request.
func (m *SearchRequest) CID() CID { return m.ContainerID }
// CID returns container id value from object delete request.
func (m *DeleteRequest) CID() CID { return m.Address.CID }
// CID returns container id value from object get range request.
func (m *GetRangeRequest) CID() CID { return m.Address.CID }
// CID returns container id value from object get range hash request.
func (m *GetRangeHashRequest) CID() CID { return m.Address.CID }
// AllowPreviousNetMap returns permission to use previous network map in object put request.
func (m *PutRequest) AllowPreviousNetMap() bool { return false }
// AllowPreviousNetMap returns permission to use previous network map in object get request.
func (m *GetRequest) AllowPreviousNetMap() bool { return true }
// AllowPreviousNetMap returns permission to use previous network map in object head request.
func (m *HeadRequest) AllowPreviousNetMap() bool { return true }
// AllowPreviousNetMap returns permission to use previous network map in object search request.
func (m *SearchRequest) AllowPreviousNetMap() bool { return true }
// AllowPreviousNetMap returns permission to use previous network map in object delete request.
func (m *DeleteRequest) AllowPreviousNetMap() bool { return false }
// AllowPreviousNetMap returns permission to use previous network map in object get range request.
func (m *GetRangeRequest) AllowPreviousNetMap() bool { return false }
// AllowPreviousNetMap returns permission to use previous network map in object get range hash request.
func (m *GetRangeHashRequest) AllowPreviousNetMap() bool { return false }
// Type returns type of the object put request.
func (m *PutRequest) Type() RequestType { return RequestPut }
// Type returns type of the object get request.
func (m *GetRequest) Type() RequestType { return RequestGet }
// Type returns type of the object head request.
func (m *HeadRequest) Type() RequestType { return RequestHead }
// Type returns type of the object search request.
func (m *SearchRequest) Type() RequestType { return RequestSearch }
// Type returns type of the object delete request.
func (m *DeleteRequest) Type() RequestType { return RequestDelete }
// Type returns type of the object get range request.
func (m *GetRangeRequest) Type() RequestType { return RequestRange }
// Type returns type of the object get range hash request.
func (m *GetRangeHashRequest) Type() RequestType { return RequestRangeHash }

File diff suppressed because it is too large Load diff

View file

@ -1,197 +0,0 @@
syntax = "proto3";
package object;
option go_package = "github.com/nspcc-dev/neofs-api-go/object";
option csharp_namespace = "NeoFS.API.Object";
import "refs/types.proto";
import "object/types.proto";
import "service/meta.proto";
import "service/verify.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Object service provides API for manipulating with the object.
service Service {
// Get the object from container. Response uses gRPC stream. First response
// message carry object of requested address. Chunk messages are parts of
// the object's payload if it is needed. All messages except first carry
// chunks. Requested object can be restored by concatenation of object
// message payload and all chunks keeping receiving order.
rpc Get(GetRequest) returns (stream GetResponse);
// Put the object into container. Request uses gRPC stream. First message
// SHOULD BE type of PutHeader. Container id and Owner id of object SHOULD
// BE set. Session token SHOULD BE obtained before put operation (see
// session package). Chunk messages considered by server as part of object
// payload. All messages except first SHOULD BE chunks. Chunk messages
// SHOULD BE sent in direct order of fragmentation.
rpc Put(stream PutRequest) returns (PutResponse);
// Delete the object from a container
rpc Delete(DeleteRequest) returns (DeleteResponse);
// Head returns the object without data payload. Object in the
// response has system header only. If full headers flag is set, extended
// headers are also present.
rpc Head(HeadRequest) returns (HeadResponse);
// Search objects in container. Version of query language format SHOULD BE
// set to 1. Search query represented in serialized format (see query
// package).
rpc Search(SearchRequest) returns (stream SearchResponse);
// GetRange of data payload. Range is a pair (offset, length).
// Requested range can be restored by concatenation of all chunks
// keeping receiving order.
rpc GetRange(GetRangeRequest) returns (stream GetRangeResponse);
// GetRangeHash returns homomorphic hash of object payload range after XOR
// operation. Ranges are set of pairs (offset, length). Hashes order in
// response corresponds to ranges order in request. Homomorphic hash is
// calculated for XORed data.
rpc GetRangeHash(GetRangeHashRequest) returns (GetRangeHashResponse);
}
message GetRequest {
// Address of object (container id + object id)
refs.Address Address = 1 [(gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetResponse {
oneof R {
// Object header and some payload
Object object = 1;
// Chunk of remaining payload
bytes Chunk = 2;
}
// ResponseMetaHeader contains meta information based on request processing by server (should be embedded into message)
service.ResponseMetaHeader Meta = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message PutRequest {
message PutHeader {
// Object with at least container id and owner id fields
Object Object = 1;
// Number of the object copies to store within the RPC call (zero is processed according to the placement rules)
uint32 CopiesNumber = 2;
}
oneof R {
// Header should be the first message in the stream
PutHeader Header = 1;
// Chunk should be a remaining message in stream should be chunks
bytes Chunk = 2;
}
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message PutResponse {
// Address of object (container id + object id)
refs.Address Address = 1 [(gogoproto.nullable) = false];
// ResponseMetaHeader contains meta information based on request processing by server (should be embedded into message)
service.ResponseMetaHeader Meta = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message DeleteRequest {
// Address of object (container id + object id)
refs.Address Address = 1 [(gogoproto.nullable) = false];
// OwnerID is a wallet address
bytes OwnerID = 2 [(gogoproto.nullable) = false, (gogoproto.customtype) = "OwnerID"];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
// DeleteResponse is empty because we cannot guarantee permanent object removal
// in distributed system.
message DeleteResponse {
// ResponseMetaHeader contains meta information based on request processing by server (should be embedded into message)
service.ResponseMetaHeader Meta = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message HeadRequest {
// Address of object (container id + object id)
refs.Address Address = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "Address"];
// FullHeaders can be set true for extended headers in the object
bool FullHeaders = 2;
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message HeadResponse {
// Object without payload
Object Object = 1;
// ResponseMetaHeader contains meta information based on request processing by server (should be embedded into message)
service.ResponseMetaHeader Meta = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message SearchRequest {
// ContainerID for searching the object
bytes ContainerID = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "CID"];
// Query in the binary serialized format
bytes Query = 2;
// QueryVersion is a version of search query format
uint32 QueryVersion = 3;
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message SearchResponse {
// Addresses of found objects
repeated refs.Address Addresses = 1 [(gogoproto.nullable) = false];
// ResponseMetaHeader contains meta information based on request processing by server (should be embedded into message)
service.ResponseMetaHeader Meta = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetRangeRequest {
// Address of object (container id + object id)
refs.Address Address = 1 [(gogoproto.nullable) = false];
// Range of object's payload to return
Range Range = 2 [(gogoproto.nullable) = false];
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetRangeResponse {
// Fragment of object's payload
bytes Fragment = 1;
// ResponseMetaHeader contains meta information based on request processing by server (should be embedded into message)
service.ResponseMetaHeader Meta = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetRangeHashRequest {
// Address of object (container id + object id)
refs.Address Address = 1 [(gogoproto.nullable) = false];
// Ranges of object's payload to calculate homomorphic hash
repeated Range Ranges = 2 [(gogoproto.nullable) = false];
// Salt is used to XOR object's payload ranges before hashing, it can be nil
bytes Salt = 3;
// RequestMetaHeader contains information about request meta headers (should be embedded into message)
service.RequestMetaHeader Meta = 98 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
// RequestVerificationHeader is a set of signatures of every NeoFS Node that processed request (should be embedded into message)
service.RequestVerificationHeader Verify = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
message GetRangeHashResponse {
// Hashes is a homomorphic hashes of all ranges
repeated bytes Hashes = 1 [(gogoproto.customtype) = "Hash", (gogoproto.nullable) = false];
// ResponseMetaHeader contains meta information based on request processing by server (should be embedded into message)
service.ResponseMetaHeader Meta = 99 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}

View file

@ -1,43 +0,0 @@
package object
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestRequest(t *testing.T) {
cases := []Request{
&PutRequest{},
&GetRequest{},
&HeadRequest{},
&SearchRequest{},
&DeleteRequest{},
&GetRangeRequest{},
&GetRangeHashRequest{},
MakePutRequestHeader(nil),
MakePutRequestHeader(&Object{}),
}
types := []RequestType{
RequestPut,
RequestGet,
RequestHead,
RequestSearch,
RequestDelete,
RequestRange,
RequestRangeHash,
RequestPut,
RequestPut,
}
for i := range cases {
v := cases[i]
t.Run(fmt.Sprintf("%T_%d", v, i), func(t *testing.T) {
require.NotPanics(t, func() { v.CID() })
require.Equal(t, types[i], v.Type())
})
}
}

View file

@ -1,43 +0,0 @@
package object
import (
"sort"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/storagegroup"
)
// Here are defined getter functions for objects that contain storage group
// information.
var _ storagegroup.Provider = (*Object)(nil)
// Group returns slice of object ids that are part of a storage group.
func (m *Object) Group() []refs.ObjectID {
sgLinks := m.Links(Link_StorageGroup)
sort.Sort(storagegroup.IDList(sgLinks))
return sgLinks
}
// Zones returns validation zones of storage group.
func (m *Object) Zones() []storagegroup.ZoneInfo {
sgInfo, err := m.StorageGroup()
if err != nil {
return nil
}
return []storagegroup.ZoneInfo{
{
Hash: sgInfo.ValidationHash,
Size: sgInfo.ValidationDataSize,
},
}
}
// IDInfo returns meta information about storage group.
func (m *Object) IDInfo() *storagegroup.IdentificationInfo {
return &storagegroup.IdentificationInfo{
SGID: m.SystemHeader.ID,
CID: m.SystemHeader.CID,
OwnerID: m.SystemHeader.OwnerID,
}
}

View file

@ -1,88 +0,0 @@
package object
import (
"math/rand"
"sort"
"testing"
"github.com/nspcc-dev/neofs-api-go/hash"
"github.com/nspcc-dev/neofs-api-go/storagegroup"
"github.com/stretchr/testify/require"
)
func TestObject_StorageGroup(t *testing.T) {
t.Run("group method", func(t *testing.T) {
var linkCount byte = 100
obj := &Object{Headers: make([]Header, 0, linkCount)}
require.Empty(t, obj.Group())
idList := make([]ID, linkCount)
for i := byte(0); i < linkCount; i++ {
idList[i] = ID{i}
obj.Headers = append(obj.Headers, Header{
Value: &Header_Link{Link: &Link{
Type: Link_StorageGroup,
ID: idList[i],
}},
})
}
rand.Shuffle(len(obj.Headers), func(i, j int) { obj.Headers[i], obj.Headers[j] = obj.Headers[j], obj.Headers[i] })
sort.Sort(storagegroup.IDList(idList))
require.Equal(t, idList, obj.Group())
})
t.Run("identification method", func(t *testing.T) {
oid, cid, owner := ID{1}, CID{2}, OwnerID{3}
obj := &Object{
SystemHeader: SystemHeader{
ID: oid,
OwnerID: owner,
CID: cid,
},
}
idInfo := obj.IDInfo()
require.Equal(t, oid, idInfo.SGID)
require.Equal(t, cid, idInfo.CID)
require.Equal(t, owner, idInfo.OwnerID)
})
t.Run("zones method", func(t *testing.T) {
sgSize := uint64(100)
d := make([]byte, sgSize)
_, err := rand.Read(d)
require.NoError(t, err)
sgHash := hash.Sum(d)
obj := &Object{
Headers: []Header{
{
Value: &Header_StorageGroup{
StorageGroup: &storagegroup.StorageGroup{
ValidationDataSize: sgSize,
ValidationHash: sgHash,
},
},
},
},
}
var (
sumSize uint64
zones = obj.Zones()
hashes = make([]Hash, len(zones))
)
for i := range zones {
sumSize += zones[i].Size
hashes[i] = zones[i].Hash
}
sumHash, err := hash.Concat(hashes)
require.NoError(t, err)
require.Equal(t, sgSize, sumSize)
require.Equal(t, sgHash, sumHash)
})
}

View file

@ -1,272 +0,0 @@
package object
import (
"crypto/ecdsa"
"encoding/binary"
"io"
"github.com/nspcc-dev/neofs-api-go/service"
)
// SignedData returns payload bytes of the request.
//
// If payload is nil, ErrHeaderNotFound returns.
func (m PutRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m PutRequest) ReadSignedData(p []byte) (int, error) {
r := m.GetR()
if r == nil {
return 0, ErrHeaderNotFound
}
return r.MarshalTo(p)
}
// SignedDataSize returns the size of payload of the Put request.
//
// If payload is nil, -1 returns.
func (m PutRequest) SignedDataSize() int {
r := m.GetR()
if r == nil {
return -1
}
return r.Size()
}
// SignedData returns payload bytes of the request.
func (m GetRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m GetRequest) ReadSignedData(p []byte) (int, error) {
addr := m.GetAddress()
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], addr.CID.Bytes())
off += copy(p[off:], addr.ObjectID.Bytes())
return off, nil
}
// SignedDataSize returns payload size of the request.
func (m GetRequest) SignedDataSize() int {
return addressSize(m.GetAddress())
}
// SignedData returns payload bytes of the request.
func (m HeadRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m HeadRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
if m.GetFullHeaders() {
p[0] = 1
} else {
p[0] = 0
}
off := 1
off += copy(p[off:], m.Address.CID.Bytes())
off += copy(p[off:], m.Address.ObjectID.Bytes())
return off, nil
}
// SignedDataSize returns payload size of the request.
func (m HeadRequest) SignedDataSize() int {
return addressSize(m.Address) + 1
}
// SignedData returns payload bytes of the request.
func (m DeleteRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m DeleteRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.OwnerID.Bytes())
off += copy(p[off:], addressBytes(m.Address))
return off, nil
}
// SignedDataSize returns payload size of the request.
func (m DeleteRequest) SignedDataSize() int {
return m.OwnerID.Size() + addressSize(m.Address)
}
// SignedData returns payload bytes of the request.
func (m GetRangeRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m GetRangeRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
n, err := (&m.Range).MarshalTo(p)
if err != nil {
return 0, err
}
n += copy(p[n:], addressBytes(m.GetAddress()))
return n, nil
}
// SignedDataSize returns payload size of the request.
func (m GetRangeRequest) SignedDataSize() int {
return (&m.Range).Size() + addressSize(m.GetAddress())
}
// SignedData returns payload bytes of the request.
func (m GetRangeHashRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m GetRangeHashRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], addressBytes(m.GetAddress()))
off += copy(p[off:], rangeSetBytes(m.GetRanges()))
off += copy(p[off:], m.GetSalt())
return off, nil
}
// SignedDataSize returns payload size of the request.
func (m GetRangeHashRequest) SignedDataSize() int {
var sz int
sz += addressSize(m.GetAddress())
sz += rangeSetSize(m.GetRanges())
sz += len(m.GetSalt())
return sz
}
// SignedData returns payload bytes of the request.
func (m SearchRequest) SignedData() ([]byte, error) {
return service.SignedDataFromReader(m)
}
// ReadSignedData copies payload bytes to passed buffer.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (m SearchRequest) ReadSignedData(p []byte) (int, error) {
if len(p) < m.SignedDataSize() {
return 0, io.ErrUnexpectedEOF
}
var off int
off += copy(p[off:], m.CID().Bytes())
binary.BigEndian.PutUint32(p[off:], m.GetQueryVersion())
off += 4
off += copy(p[off:], m.GetQuery())
return off, nil
}
// SignedDataSize returns payload size of the request.
func (m SearchRequest) SignedDataSize() int {
var sz int
sz += m.CID().Size()
sz += 4 // uint32 Version
sz += len(m.GetQuery())
return sz
}
func rangeSetSize(rs []Range) int {
return 4 + len(rs)*16 // two uint64 fields
}
func rangeSetBytes(rs []Range) []byte {
data := make([]byte, rangeSetSize(rs))
binary.BigEndian.PutUint32(data, uint32(len(rs)))
off := 4
for i := range rs {
binary.BigEndian.PutUint64(data[off:], rs[i].Offset)
off += 8
binary.BigEndian.PutUint64(data[off:], rs[i].Length)
off += 8
}
return data
}
func addressSize(addr Address) int {
return addr.CID.Size() + addr.ObjectID.Size()
}
func addressBytes(addr Address) []byte {
return append(addr.CID.Bytes(), addr.ObjectID.Bytes()...)
}
// SignedData returns the result of the ChecksumSignature field getter.
func (m IntegrityHeader) SignedData() ([]byte, error) {
return m.GetHeadersChecksum(), nil
}
// AddSignKey calls the ChecksumSignature field setter with signature argument.
func (m *IntegrityHeader) AddSignKey(sign []byte, _ *ecdsa.PublicKey) {
m.SetSignature(sign)
}

View file

@ -1,239 +0,0 @@
package object
import (
"crypto/rand"
"testing"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestSignVerifyRequests(t *testing.T) {
sk := test.DecodeKey(0)
type sigType interface {
service.RequestData
service.SignKeyPairAccumulator
service.SignKeyPairSource
SetToken(*Token)
}
items := []struct {
constructor func() sigType
payloadCorrupt []func(sigType)
}{
{ // PutRequest.PutHeader
constructor: func() sigType {
return MakePutRequestHeader(new(Object))
},
payloadCorrupt: []func(sigType){
func(s sigType) {
obj := s.(*PutRequest).GetR().(*PutRequest_Header).Header.GetObject()
obj.SystemHeader.PayloadLength++
},
},
},
{ // PutRequest.Chunk
constructor: func() sigType {
return MakePutRequestChunk(make([]byte, 10))
},
payloadCorrupt: []func(sigType){
func(s sigType) {
h := s.(*PutRequest).GetR().(*PutRequest_Chunk)
h.Chunk[0]++
},
},
},
{ // GetRequest
constructor: func() sigType {
return new(GetRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
s.(*GetRequest).Address.CID[0]++
},
func(s sigType) {
s.(*GetRequest).Address.ObjectID[0]++
},
},
},
{ // HeadRequest
constructor: func() sigType {
return new(HeadRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
s.(*HeadRequest).Address.CID[0]++
},
func(s sigType) {
s.(*HeadRequest).Address.ObjectID[0]++
},
func(s sigType) {
s.(*HeadRequest).FullHeaders = true
},
},
},
{ // DeleteRequest
constructor: func() sigType {
return new(DeleteRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
s.(*DeleteRequest).OwnerID[0]++
},
func(s sigType) {
s.(*DeleteRequest).Address.CID[0]++
},
func(s sigType) {
s.(*DeleteRequest).Address.ObjectID[0]++
},
},
},
{ // GetRangeRequest
constructor: func() sigType {
return new(GetRangeRequest)
},
payloadCorrupt: []func(sigType){
func(s sigType) {
s.(*GetRangeRequest).Range.Length++
},
func(s sigType) {
s.(*GetRangeRequest).Range.Offset++
},
func(s sigType) {
s.(*GetRangeRequest).Address.CID[0]++
},
func(s sigType) {
s.(*GetRangeRequest).Address.ObjectID[0]++
},
},
},
{ // GetRangeHashRequest
constructor: func() sigType {
return &GetRangeHashRequest{
Ranges: []Range{{}},
Salt: []byte{1, 2, 3},
}
},
payloadCorrupt: []func(sigType){
func(s sigType) {
s.(*GetRangeHashRequest).Address.CID[0]++
},
func(s sigType) {
s.(*GetRangeHashRequest).Address.ObjectID[0]++
},
func(s sigType) {
s.(*GetRangeHashRequest).Salt[0]++
},
func(s sigType) {
s.(*GetRangeHashRequest).Ranges[0].Length++
},
func(s sigType) {
s.(*GetRangeHashRequest).Ranges[0].Offset++
},
func(s sigType) {
s.(*GetRangeHashRequest).Ranges = nil
},
},
},
{ // GetRangeHashRequest
constructor: func() sigType {
return &SearchRequest{
Query: []byte{1, 2, 3},
}
},
payloadCorrupt: []func(sigType){
func(s sigType) {
s.(*SearchRequest).ContainerID[0]++
},
func(s sigType) {
s.(*SearchRequest).Query[0]++
},
func(s sigType) {
s.(*SearchRequest).QueryVersion++
},
},
},
}
for _, item := range items {
{ // token corruptions
v := item.constructor()
token := new(Token)
v.SetToken(token)
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
token.SetSessionKey(append(token.GetSessionKey(), 1))
require.Error(t, service.VerifyRequestData(v))
}
{ // payload corruptions
for _, corruption := range item.payloadCorrupt {
v := item.constructor()
require.NoError(t, service.SignRequestData(sk, v))
require.NoError(t, service.VerifyRequestData(v))
corruption(v)
require.Error(t, service.VerifyRequestData(v))
}
}
}
}
func TestHeadRequest_ReadSignedData(t *testing.T) {
t.Run("full headers", func(t *testing.T) {
req := new(HeadRequest)
// unset FullHeaders flag
req.SetFullHeaders(false)
// allocate two different buffers for reading
buf1 := testData(t, req.SignedDataSize())
buf2 := testData(t, req.SignedDataSize())
// read to both buffers
n1, err := req.ReadSignedData(buf1)
require.NoError(t, err)
n2, err := req.ReadSignedData(buf2)
require.NoError(t, err)
require.Equal(t, buf1[:n1], buf2[:n2])
})
}
func testData(t *testing.T, sz int) []byte {
data := make([]byte, sz)
_, err := rand.Read(data)
require.NoError(t, err)
return data
}
func TestIntegrityHeaderSignMethods(t *testing.T) {
// create new IntegrityHeader
s := new(IntegrityHeader)
// set test headers checksum
s.SetHeadersChecksum([]byte{1, 2, 3})
data, err := s.SignedData()
require.NoError(t, err)
require.Equal(t, data, s.GetHeadersChecksum())
// add signature
sig := []byte{4, 5, 6}
s.AddSignKey(sig, nil)
require.Equal(t, sig, s.GetSignature())
}

View file

@ -1,422 +0,0 @@
package object
import (
"bytes"
"context"
"fmt"
"io"
"reflect"
"github.com/gogo/protobuf/proto"
"github.com/nspcc-dev/neofs-api-go/internal"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/pkg/errors"
)
type (
// Pred defines a predicate function that can check if passed header
// satisfies predicate condition. It is used to find headers of
// specific type.
Pred = func(*Header) bool
// Address is a type alias of object Address.
Address = refs.Address
// PositionReader defines object reader that returns slice of bytes
// for specified object and data range.
PositionReader interface {
PRead(ctx context.Context, addr refs.Address, rng Range) ([]byte, error)
}
// RequestType of the object service requests.
RequestType int
headerType int
)
const (
// ErrVerifyPayload is raised when payload checksum cannot be verified.
ErrVerifyPayload = internal.Error("can't verify payload")
// ErrVerifyHeader is raised when object integrity cannot be verified.
ErrVerifyHeader = internal.Error("can't verify header")
// ErrHeaderNotFound is raised when requested header not found.
ErrHeaderNotFound = internal.Error("header not found")
// ErrVerifySignature is raised when signature cannot be verified.
ErrVerifySignature = internal.Error("can't verify signature")
)
const (
_ headerType = iota
// LinkHdr is a link header type.
LinkHdr
// RedirectHdr is a redirect header type.
RedirectHdr
// UserHdr is a user defined header type.
UserHdr
// TransformHdr is a transformation header type.
TransformHdr
// TombstoneHdr is a tombstone header type.
TombstoneHdr
// TokenHdr is a token header type.
TokenHdr
// HomoHashHdr is a homomorphic hash header type.
HomoHashHdr
// PayloadChecksumHdr is a payload checksum header type.
PayloadChecksumHdr
// IntegrityHdr is a integrity header type.
IntegrityHdr
// StorageGroupHdr is a storage group header type.
StorageGroupHdr
// PublicKeyHdr is a public key header type.
PublicKeyHdr
)
const (
_ RequestType = iota
// RequestPut is a type for object put request.
RequestPut
// RequestGet is a type for object get request.
RequestGet
// RequestHead is a type for object head request.
RequestHead
// RequestSearch is a type for object search request.
RequestSearch
// RequestRange is a type for object range request.
RequestRange
// RequestRangeHash is a type for object hash range request.
RequestRangeHash
// RequestDelete is a type for object delete request.
RequestDelete
)
var (
_ internal.Custom = (*Object)(nil)
emptyObject = new(Object).Bytes()
)
// String returns printable name of the request type.
func (s RequestType) String() string {
switch s {
case RequestPut:
return "PUT"
case RequestGet:
return "GET"
case RequestHead:
return "HEAD"
case RequestSearch:
return "SEARCH"
case RequestRange:
return "RANGE"
case RequestRangeHash:
return "RANGE_HASH"
case RequestDelete:
return "DELETE"
default:
return "UNKNOWN"
}
}
// Bytes returns marshaled object in a binary format.
func (m Object) Bytes() []byte { data, _ := m.Marshal(); return data }
// Empty checks if object does not contain any information.
func (m Object) Empty() bool { return bytes.Equal(m.Bytes(), emptyObject) }
// LastHeader returns last header of the specified type. Type must be
// specified as a Pred function.
func (m Object) LastHeader(f Pred) (int, *Header) {
for i := len(m.Headers) - 1; i >= 0; i-- {
if f != nil && f(&m.Headers[i]) {
return i, &m.Headers[i]
}
}
return -1, nil
}
// AddHeader adds passed header to the end of extended header list.
func (m *Object) AddHeader(h *Header) {
m.Headers = append(m.Headers, *h)
}
// SetPayload sets payload field and payload length in the system header.
func (m *Object) SetPayload(payload []byte) {
m.Payload = payload
m.SystemHeader.PayloadLength = uint64(len(payload))
}
// SetHeader replaces existing extended header or adds new one to the end of
// extended header list.
func (m *Object) SetHeader(h *Header) {
// looking for the header of that type
for i := range m.Headers {
if m.Headers[i].typeOf(h.Value) {
// if we found one - set it with new value and return
m.Headers[i] = *h
return
}
}
// if we did not find one - add this header
m.AddHeader(h)
}
// Merge used by proto.Clone
func (m *Object) Merge(src proto.Message) {
if tmp, ok := src.(*Object); ok {
tmp.CopyTo(m)
}
}
func (m Header) typeOf(t isHeader_Value) (ok bool) {
switch t.(type) {
case *Header_Link:
_, ok = m.Value.(*Header_Link)
case *Header_Redirect:
_, ok = m.Value.(*Header_Redirect)
case *Header_UserHeader:
_, ok = m.Value.(*Header_UserHeader)
case *Header_Transform:
_, ok = m.Value.(*Header_Transform)
case *Header_Tombstone:
_, ok = m.Value.(*Header_Tombstone)
case *Header_Token:
_, ok = m.Value.(*Header_Token)
case *Header_HomoHash:
_, ok = m.Value.(*Header_HomoHash)
case *Header_PayloadChecksum:
_, ok = m.Value.(*Header_PayloadChecksum)
case *Header_Integrity:
_, ok = m.Value.(*Header_Integrity)
case *Header_StorageGroup:
_, ok = m.Value.(*Header_StorageGroup)
case *Header_PublicKey:
_, ok = m.Value.(*Header_PublicKey)
}
return
}
// HeaderType returns predicate that check if extended header is a header
// of specified type.
func HeaderType(t headerType) Pred {
switch t {
case LinkHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_Link); return ok }
case RedirectHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_Redirect); return ok }
case UserHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_UserHeader); return ok }
case TransformHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_Transform); return ok }
case TombstoneHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_Tombstone); return ok }
case TokenHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_Token); return ok }
case HomoHashHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_HomoHash); return ok }
case PayloadChecksumHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_PayloadChecksum); return ok }
case IntegrityHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_Integrity); return ok }
case StorageGroupHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_StorageGroup); return ok }
case PublicKeyHdr:
return func(h *Header) bool { _, ok := h.Value.(*Header_PublicKey); return ok }
default:
return nil
}
}
// Copy creates full copy of the object.
func (m *Object) Copy() (obj *Object) {
obj = new(Object)
m.CopyTo(obj)
return
}
// CopyTo creates fills passed object with the data from the current object.
// This function creates copies on every available data slice.
func (m *Object) CopyTo(o *Object) {
o.SystemHeader = m.SystemHeader
if m.Headers != nil {
o.Headers = make([]Header, len(m.Headers))
}
if m.Payload != nil {
o.Payload = make([]byte, len(m.Payload))
copy(o.Payload, m.Payload)
}
for i := range m.Headers {
switch v := m.Headers[i].Value.(type) {
case *Header_Link:
link := *v.Link
o.Headers[i] = Header{
Value: &Header_Link{
Link: &link,
},
}
case *Header_HomoHash:
hash := proto.Clone(&v.HomoHash).(*Hash)
o.Headers[i] = Header{
Value: &Header_HomoHash{
HomoHash: *hash,
},
}
case *Header_Token:
token := *v.Token
o.Headers[i] = Header{
Value: &Header_Token{
Token: &token,
},
}
default:
o.Headers[i] = *proto.Clone(&m.Headers[i]).(*Header)
}
}
}
// Address returns object's address.
func (m Object) Address() *refs.Address {
return &refs.Address{
ObjectID: m.SystemHeader.ID,
CID: m.SystemHeader.CID,
}
}
func (m CreationPoint) String() string {
return fmt.Sprintf(`{UnixTime=%d Epoch=%d}`, m.UnixTime, m.Epoch)
}
// Stringify converts object into string format.
func Stringify(dst io.Writer, obj *Object) error {
// put empty line
if _, err := fmt.Fprintln(dst); err != nil {
return err
}
// put object line
if _, err := fmt.Fprintln(dst, "Object:"); err != nil {
return err
}
// put system headers
if _, err := fmt.Fprintln(dst, "\tSystemHeader:"); err != nil {
return err
}
sysHeaders := []string{"ID", "CID", "OwnerID", "Version", "PayloadLength", "CreatedAt"}
v := reflect.ValueOf(obj.SystemHeader)
for _, key := range sysHeaders {
if !v.FieldByName(key).IsValid() {
return errors.Errorf("invalid system header key: %q", key)
}
val := v.FieldByName(key).Interface()
if _, err := fmt.Fprintf(dst, "\t\t- %s=%v\n", key, val); err != nil {
return err
}
}
// put user headers
if _, err := fmt.Fprintln(dst, "\tUserHeaders:"); err != nil {
return err
}
for _, header := range obj.Headers {
var (
typ = reflect.ValueOf(header.Value)
key string
val interface{}
)
switch t := typ.Interface().(type) {
case *Header_Link:
key = "Link"
val = fmt.Sprintf(`{Type=%s ID=%s}`, t.Link.Type, t.Link.ID)
case *Header_Redirect:
key = "Redirect"
val = fmt.Sprintf(`{CID=%s OID=%s}`, t.Redirect.CID, t.Redirect.ObjectID)
case *Header_UserHeader:
key = "UserHeader"
val = fmt.Sprintf(`{Key=%s Val=%s}`, t.UserHeader.Key, t.UserHeader.Value)
case *Header_Transform:
key = "Transform"
val = t.Transform.Type.String()
case *Header_Tombstone:
key = "Tombstone"
val = "MARKED"
case *Header_Token:
key = "Token"
val = fmt.Sprintf("{"+
"ID=%s OwnerID=%s Verb=%s Address=%s Created=%d ValidUntil=%d SessionKey=%02x Signature=%02x"+
"}",
t.Token.GetID(),
t.Token.GetOwnerID(),
t.Token.GetVerb(),
t.Token.GetAddress(),
t.Token.CreationEpoch(),
t.Token.ExpirationEpoch(),
t.Token.GetSessionKey(),
t.Token.GetSignature())
case *Header_HomoHash:
key = "HomoHash"
val = t.HomoHash
case *Header_PayloadChecksum:
key = "PayloadChecksum"
val = t.PayloadChecksum
case *Header_Integrity:
key = "Integrity"
val = fmt.Sprintf(`{Checksum=%02x Signature=%02x}`,
t.Integrity.HeadersChecksum,
t.Integrity.ChecksumSignature)
case *Header_StorageGroup:
key = "StorageGroup"
val = fmt.Sprintf(`{DataSize=%d Hash=%02x Lifetime={Unit=%s Value=%d}}`,
t.StorageGroup.ValidationDataSize,
t.StorageGroup.ValidationHash,
t.StorageGroup.Lifetime.Unit,
t.StorageGroup.Lifetime.Value)
case *Header_PublicKey:
key = "PublicKey"
val = t.PublicKey.Value
default:
key = "Unknown"
val = t
}
if _, err := fmt.Fprintf(dst, "\t\t- Type=%s\n\t\t Value=%v\n", key, val); err != nil {
return err
}
}
// put payload
if _, err := fmt.Fprintf(dst, "\tPayload: %#v\n", obj.Payload); err != nil {
return err
}
return nil
}
// SetFullHeaders is a FullHeaders field setter.
func (m *HeadRequest) SetFullHeaders(v bool) {
m.FullHeaders = v
}
// GetSignature is a ChecksumSignature field getter.
func (m IntegrityHeader) GetSignature() []byte {
return m.ChecksumSignature
}
// SetSignature is a ChecksumSignature field setter.
func (m *IntegrityHeader) SetSignature(v []byte) {
m.ChecksumSignature = v
}
// SetHeadersChecksum is a HeadersChecksum field setter.
func (m *IntegrityHeader) SetHeadersChecksum(v []byte) {
m.HeadersChecksum = v
}

File diff suppressed because it is too large Load diff

View file

@ -1,134 +0,0 @@
syntax = "proto3";
package object;
option go_package = "github.com/nspcc-dev/neofs-api-go/object";
option csharp_namespace = "NeoFS.API.Object";
import "refs/types.proto";
import "service/verify.proto";
import "storagegroup/types.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
message Range {
// Offset of the data range
uint64 Offset = 1;
// Length of the data range
uint64 Length = 2;
}
message UserHeader {
// Key of the user's header
string Key = 1;
// Value of the user's header
string Value = 2;
}
message Header {
oneof Value {
// Link to other objects
Link Link = 1;
// Redirect not used yet
refs.Address Redirect = 2;
// UserHeader is a set of KV headers defined by user
UserHeader UserHeader = 3;
// Transform defines transform operation (e.g. payload split)
Transform Transform = 4;
// Tombstone header that set up in deleted objects
Tombstone Tombstone = 5;
// Token header contains token of the session within which the object was created
service.Token Token = 6;
// HomoHash is a homomorphic hash of original object payload
bytes HomoHash = 7 [(gogoproto.customtype) = "Hash"];
// PayloadChecksum of actual object's payload
bytes PayloadChecksum = 8;
// Integrity header with checksum of all above headers in the object
IntegrityHeader Integrity = 9;
// StorageGroup contains meta information for the data audit
storagegroup.StorageGroup StorageGroup = 10;
// PublicKey of owner of the object. Key is used for verification and can be based on NeoID or x509 cert.
PublicKey PublicKey = 11;
}
}
message Tombstone {}
message SystemHeader {
// Version of the object structure
uint64 Version = 1;
// PayloadLength is an object payload length
uint64 PayloadLength = 2;
// ID is an object identifier, is a valid UUIDv4
bytes ID = 3 [(gogoproto.customtype) = "ID", (gogoproto.nullable) = false];
// OwnerID is a wallet address
bytes OwnerID = 4 [(gogoproto.customtype) = "OwnerID", (gogoproto.nullable) = false];
// CID is a SHA256 hash of the container structure (container identifier)
bytes CID = 5 [(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
// CreatedAt is a timestamp of object creation
CreationPoint CreatedAt = 6 [(gogoproto.nullable) = false];
}
message CreationPoint {
option (gogoproto.goproto_stringer) = false;
// UnixTime is a date of creation in unixtime format
int64 UnixTime = 1;
// Epoch is a date of creation in NeoFS epochs
uint64 Epoch = 2;
}
message IntegrityHeader {
// HeadersChecksum is a checksum of all above headers in the object
bytes HeadersChecksum = 1;
// ChecksumSignature is an user's signature of checksum to verify if it is correct
bytes ChecksumSignature = 2;
}
message Link {
enum Type {
Unknown = 0;
// Parent object created during object transformation
Parent = 1;
// Previous object in the linked list created during object transformation
Previous = 2;
// Next object in the linked list created during object transformation
Next = 3;
// Child object created during object transformation
Child = 4;
// Object that included into this storage group
StorageGroup = 5;
}
// Type of link
Type type = 1;
// ID is an object identifier, is a valid UUIDv4
bytes ID = 2 [(gogoproto.customtype) = "ID", (gogoproto.nullable) = false];
}
message Transform {
enum Type {
Unknown = 0;
// Split sets when object created after payload split
Split = 1;
// Sign sets when object created after re-signing (doesn't used)
Sign = 2;
// Mould sets when object created after filling missing headers in the object
Mould = 3;
}
// Type of object transformation
Type type = 1;
}
message Object {
// SystemHeader describes system header
SystemHeader SystemHeader = 1 [(gogoproto.nullable) = false];
// Headers describes a set of an extended headers
repeated Header Headers = 2 [(gogoproto.nullable) = false];
// Payload is an object's payload
bytes Payload = 3;
}
message PublicKey {
// Value contains marshaled ecdsa public key
bytes Value = 1;
}

View file

@ -1,234 +0,0 @@
package object
import (
"bytes"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/storagegroup"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestStringify(t *testing.T) {
res := `
Object:
SystemHeader:
- ID=7e0b9c6c-aabc-4985-949e-2680e577b48b
- CID=11111111111111111111111111111111
- OwnerID=NQHKh7fKGieCPrPuiEkY58ucRFwWMyU1Mc
- Version=1
- PayloadLength=1
- CreatedAt={UnixTime=1 Epoch=1}
UserHeaders:
- Type=Link
Value={Type=Child ID=7e0b9c6c-aabc-4985-949e-2680e577b48b}
- Type=Redirect
Value={CID=11111111111111111111111111111111 OID=7e0b9c6c-aabc-4985-949e-2680e577b48b}
- Type=UserHeader
Value={Key=test_key Val=test_value}
- Type=Transform
Value=Split
- Type=Tombstone
Value=MARKED
- Type=Token
Value={ID=7e0b9c6c-aabc-4985-949e-2680e577b48b OwnerID=NQHKh7fKGieCPrPuiEkY58ucRFwWMyU1Mc Verb=Search Address=11111111111111111111111111111111/7e0b9c6c-aabc-4985-949e-2680e577b48b Created=1 ValidUntil=2 SessionKey=010203040506 Signature=010203040506}
- Type=HomoHash
Value=1111111111111111111111111111111111111111111111111111111111111111
- Type=PayloadChecksum
Value=[1 2 3 4 5 6]
- Type=Integrity
Value={Checksum=010203040506 Signature=010203040506}
- Type=StorageGroup
Value={DataSize=5 Hash=31313131313131313131313131313131313131313131313131313131313131313131313131313131313131313131313131313131313131313131313131313131 Lifetime={Unit=UnixTime Value=555}}
- Type=PublicKey
Value=[1 2 3 4 5 6]
Payload: []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7}
`
key := test.DecodeKey(0)
uid, err := refs.NewOwnerID(&key.PublicKey)
require.NoError(t, err)
var oid refs.UUID
require.NoError(t, oid.Parse("7e0b9c6c-aabc-4985-949e-2680e577b48b"))
obj := &Object{
SystemHeader: SystemHeader{
Version: 1,
PayloadLength: 1,
ID: oid,
OwnerID: uid,
CID: CID{},
CreatedAt: CreationPoint{
UnixTime: 1,
Epoch: 1,
},
},
Payload: []byte{1, 2, 3, 4, 5, 6, 7},
}
// *Header_Link
obj.Headers = append(obj.Headers, Header{
Value: &Header_Link{
Link: &Link{ID: oid, Type: Link_Child},
},
})
// *Header_Redirect
obj.Headers = append(obj.Headers, Header{
Value: &Header_Redirect{
Redirect: &Address{ObjectID: oid, CID: CID{}},
},
})
// *Header_UserHeader
obj.Headers = append(obj.Headers, Header{
Value: &Header_UserHeader{
UserHeader: &UserHeader{
Key: "test_key",
Value: "test_value",
},
},
})
// *Header_Transform
obj.Headers = append(obj.Headers, Header{
Value: &Header_Transform{
Transform: &Transform{
Type: Transform_Split,
},
},
})
// *Header_Tombstone
obj.Headers = append(obj.Headers, Header{
Value: &Header_Tombstone{
Tombstone: &Tombstone{},
},
})
token := new(Token)
token.SetID(oid)
token.SetOwnerID(uid)
token.SetVerb(service.Token_Info_Search)
token.SetAddress(Address{ObjectID: oid, CID: refs.CID{}})
token.SetCreationEpoch(1)
token.SetExpirationEpoch(2)
token.SetSessionKey([]byte{1, 2, 3, 4, 5, 6})
token.SetSignature([]byte{1, 2, 3, 4, 5, 6})
// *Header_Token
obj.Headers = append(obj.Headers, Header{
Value: &Header_Token{
Token: token,
},
})
// *Header_HomoHash
obj.Headers = append(obj.Headers, Header{
Value: &Header_HomoHash{
HomoHash: Hash{},
},
})
// *Header_PayloadChecksum
obj.Headers = append(obj.Headers, Header{
Value: &Header_PayloadChecksum{
PayloadChecksum: []byte{1, 2, 3, 4, 5, 6},
},
})
// *Header_Integrity
obj.Headers = append(obj.Headers, Header{
Value: &Header_Integrity{
Integrity: &IntegrityHeader{
HeadersChecksum: []byte{1, 2, 3, 4, 5, 6},
ChecksumSignature: []byte{1, 2, 3, 4, 5, 6},
},
},
})
// *Header_StorageGroup
obj.Headers = append(obj.Headers, Header{
Value: &Header_StorageGroup{
StorageGroup: &storagegroup.StorageGroup{
ValidationDataSize: 5,
ValidationHash: storagegroup.Hash{},
Lifetime: &storagegroup.StorageGroup_Lifetime{
Unit: storagegroup.StorageGroup_Lifetime_UnixTime,
Value: 555,
},
},
},
})
// *Header_PublicKey
obj.Headers = append(obj.Headers, Header{
Value: &Header_PublicKey{
PublicKey: &PublicKey{Value: []byte{1, 2, 3, 4, 5, 6}},
},
})
buf := new(bytes.Buffer)
require.NoError(t, Stringify(buf, obj))
require.Equal(t, res, buf.String())
}
func TestObject_Copy(t *testing.T) {
t.Run("token header", func(t *testing.T) {
token := new(Token)
token.SetID(service.TokenID{1, 2, 3})
obj := new(Object)
obj.AddHeader(&Header{
Value: &Header_Token{
Token: token,
},
})
{ // Copying
cp := obj.Copy()
_, h := cp.LastHeader(HeaderType(TokenHdr))
require.NotNil(t, h)
require.Equal(t, token, h.GetValue().(*Header_Token).Token)
}
{ // Cloning
cl := proto.Clone(obj).(*Object)
require.Equal(t, obj, cl)
_, h := cl.LastHeader(HeaderType(TokenHdr))
h.GetToken().SetID(service.TokenID{3, 2, 1})
require.NotEqual(t, token, h.GetToken())
}
})
}
func TestIntegrityHeaderGettersSetters(t *testing.T) {
t.Run("headers checksum", func(t *testing.T) {
data := []byte{1, 2, 3}
v := new(IntegrityHeader)
v.SetHeadersChecksum(data)
require.Equal(t, data, v.GetHeadersChecksum())
})
t.Run("headers checksum", func(t *testing.T) {
data := []byte{1, 2, 3}
v := new(IntegrityHeader)
v.SetSignature(data)
require.Equal(t, data, v.GetSignature())
})
}

View file

@ -1,100 +0,0 @@
package object
import (
"io"
"strconv"
"github.com/pkg/errors"
)
// FilenameHeader is a user header key for names of files, stored by third
// party apps. We recommend to use this header to be compatible with neofs
// http gate, neofs minio gate and neofs-dropper application.
const FilenameHeader = "filename"
// ByteSize used to format bytes
type ByteSize uint64
// String represents ByteSize in string format
func (b ByteSize) String() string {
var (
dec int64
unit string
num = int64(b)
)
switch {
case num > UnitsTB:
unit = "TB"
dec = UnitsTB
case num > UnitsGB:
unit = "GB"
dec = UnitsGB
case num > UnitsMB:
unit = "MB"
dec = UnitsMB
case num > UnitsKB:
unit = "KB"
dec = UnitsKB
default:
dec = 1
}
return strconv.FormatFloat(float64(num)/float64(dec), 'g', 6, 64) + unit
}
// MakePutRequestHeader combines object and session token value
// into header of object put request.
func MakePutRequestHeader(obj *Object) *PutRequest {
return &PutRequest{
R: &PutRequest_Header{Header: &PutRequest_PutHeader{
Object: obj,
}},
}
}
// MakePutRequestChunk splits data into chunks that will be transferred
// in the protobuf stream.
func MakePutRequestChunk(chunk []byte) *PutRequest {
return &PutRequest{R: &PutRequest_Chunk{Chunk: chunk}}
}
func errMaxSizeExceeded(size uint64) error {
return errors.Errorf("object payload size exceed: %s", ByteSize(size).String())
}
// ReceiveGetResponse receives object by chunks from the protobuf stream
// and combine it into single get response structure.
func ReceiveGetResponse(c Service_GetClient, maxSize uint64) (*GetResponse, error) {
res, err := c.Recv()
if err == io.EOF {
return res, err
} else if err != nil {
return nil, err
}
obj := res.GetObject()
if obj == nil {
return nil, ErrHeaderExpected
}
if obj.SystemHeader.PayloadLength > maxSize {
return nil, errMaxSizeExceeded(maxSize)
}
if res.NotFull() {
payload := make([]byte, obj.SystemHeader.PayloadLength)
offset := copy(payload, obj.Payload)
var r *GetResponse
for r, err = c.Recv(); err == nil; r, err = c.Recv() {
offset += copy(payload[offset:], r.GetChunk())
}
if err != io.EOF {
return nil, err
}
obj.Payload = payload
}
return res, nil
}

View file

@ -1,53 +0,0 @@
package object
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestByteSize_String(t *testing.T) {
cases := []struct {
name string
expect string
actual ByteSize
}{
{
name: "0 bytes",
expect: "0",
actual: ByteSize(0),
},
{
name: "101 bytes",
expect: "101",
actual: ByteSize(101),
},
{
name: "112.84KB",
expect: "112.84KB",
actual: ByteSize(115548),
},
{
name: "80.44MB",
expect: "80.44MB",
actual: ByteSize(84347453),
},
{
name: "905.144GB",
expect: "905.144GB",
actual: ByteSize(971891061884),
},
{
name: "1.857TB",
expect: "1.857TB",
actual: ByteSize(2041793092780),
},
}
for i := range cases {
tt := cases[i]
t.Run(tt.name, func(t *testing.T) {
require.Equal(t, tt.expect, tt.actual.String())
})
}
}

View file

@ -1,149 +0,0 @@
package object
import (
"bytes"
"crypto/ecdsa"
"crypto/sha256"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/pkg/errors"
)
func (m Object) headersData(check bool) ([]byte, error) {
var bytebuf = new(bytes.Buffer)
// fixme: we must marshal fields one by one without protobuf marshaling
// protobuf marshaling does not guarantee the same result
if sysheader, err := m.SystemHeader.Marshal(); err != nil {
return nil, err
} else if _, err := bytebuf.Write(sysheader); err != nil {
return nil, err
}
n, _ := m.LastHeader(HeaderType(IntegrityHdr))
for i := range m.Headers {
if check && i == n {
// ignore last integrity header in order to check headers data
continue
}
if header, err := m.Headers[i].Marshal(); err != nil {
return nil, err
} else if _, err := bytebuf.Write(header); err != nil {
return nil, err
}
}
return bytebuf.Bytes(), nil
}
func (m Object) headersChecksum(check bool) ([]byte, error) {
data, err := m.headersData(check)
if err != nil {
return nil, err
}
checksum := sha256.Sum256(data)
return checksum[:], nil
}
// PayloadChecksum calculates sha256 checksum of object payload.
func (m Object) PayloadChecksum() []byte {
checksum := sha256.Sum256(m.Payload)
return checksum[:]
}
func (m Object) verifySignature(key []byte, ih *IntegrityHeader) error {
pk := crypto.UnmarshalPublicKey(key)
if crypto.Verify(pk, ih.HeadersChecksum, ih.ChecksumSignature) == nil {
return nil
}
return ErrVerifySignature
}
// Verify performs local integrity check by finding verification header and
// integrity header. If header integrity is passed, function verifies
// checksum of the object payload.
// todo: move this verification logic into separate library
func (m Object) Verify() error {
var (
err error
checksum []byte
pubkey []byte
)
ind, ih := m.LastHeader(HeaderType(IntegrityHdr))
if ih == nil || ind != len(m.Headers)-1 {
return ErrHeaderNotFound
}
integrity := ih.Value.(*Header_Integrity).Integrity
// Prepare structures
_, vh := m.LastHeader(HeaderType(TokenHdr))
if vh == nil {
_, pkh := m.LastHeader(HeaderType(PublicKeyHdr))
if pkh == nil {
return ErrHeaderNotFound
}
pubkey = pkh.Value.(*Header_PublicKey).PublicKey.Value
} else {
pubkey = vh.Value.(*Header_Token).Token.GetSessionKey()
}
// Verify signature
err = m.verifySignature(pubkey, integrity)
if err != nil {
return errors.Wrapf(err, "public key: %x", pubkey)
}
// Verify checksum of header
checksum, err = m.headersChecksum(true)
if err != nil {
return err
}
if !bytes.Equal(integrity.HeadersChecksum, checksum) {
return ErrVerifyHeader
}
// Verify checksum of payload
if m.SystemHeader.PayloadLength > 0 && !m.IsLinking() {
checksum = m.PayloadChecksum()
_, ph := m.LastHeader(HeaderType(PayloadChecksumHdr))
if ph == nil {
return ErrHeaderNotFound
}
if !bytes.Equal(ph.Value.(*Header_PayloadChecksum).PayloadChecksum, checksum) {
return ErrVerifyPayload
}
}
return nil
}
// CreateIntegrityHeader returns signed integrity header for the object
func CreateIntegrityHeader(obj *Object, key *ecdsa.PrivateKey) (*Header, error) {
headerChecksum, err := obj.headersChecksum(false)
if err != nil {
return nil, err
}
headerChecksumSignature, err := crypto.Sign(key, headerChecksum)
if err != nil {
return nil, err
}
return &Header{Value: &Header_Integrity{
Integrity: &IntegrityHeader{
HeadersChecksum: headerChecksum,
ChecksumSignature: headerChecksumSignature,
},
}}, nil
}
// Sign creates new integrity header and adds it to the end of the list of
// extended headers.
func (m *Object) Sign(key *ecdsa.PrivateKey) error {
ih, err := CreateIntegrityHeader(m, key)
if err != nil {
return err
}
m.AddHeader(ih)
return nil
}

View file

@ -1,144 +0,0 @@
package object
import (
"testing"
"github.com/google/uuid"
"github.com/nspcc-dev/neofs-api-go/container"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestObject_Verify(t *testing.T) {
key := test.DecodeKey(0)
sessionkey := test.DecodeKey(1)
payload := make([]byte, 1024*1024)
cnr, err := container.NewTestContainer()
require.NoError(t, err)
cid, err := cnr.ID()
require.NoError(t, err)
id, err := uuid.NewRandom()
uid := refs.UUID(id)
require.NoError(t, err)
obj := &Object{
SystemHeader: SystemHeader{
ID: uid,
CID: cid,
OwnerID: refs.OwnerID([refs.OwnerIDSize]byte{}),
},
Headers: []Header{
{
Value: &Header_UserHeader{
UserHeader: &UserHeader{
Key: "Profession",
Value: "Developer",
},
},
},
{
Value: &Header_UserHeader{
UserHeader: &UserHeader{
Key: "Language",
Value: "GO",
},
},
},
},
}
obj.SetPayload(payload)
obj.SetHeader(&Header{Value: &Header_PayloadChecksum{[]byte("incorrect checksum")}})
t.Run("error no integrity header and pubkey", func(t *testing.T) {
err = obj.Verify()
require.EqualError(t, err, ErrHeaderNotFound.Error())
})
badHeaderChecksum := []byte("incorrect checksum")
signature, err := crypto.Sign(sessionkey, badHeaderChecksum)
require.NoError(t, err)
ih := &IntegrityHeader{
HeadersChecksum: badHeaderChecksum,
ChecksumSignature: signature,
}
obj.SetHeader(&Header{Value: &Header_Integrity{ih}})
t.Run("error no validation header", func(t *testing.T) {
err = obj.Verify()
require.EqualError(t, err, ErrHeaderNotFound.Error())
})
dataPK := crypto.MarshalPublicKey(&sessionkey.PublicKey)
signature, err = crypto.Sign(key, dataPK)
tok := new(Token)
tok.SetSignature(signature)
tok.SetSessionKey(dataPK)
obj.AddHeader(&Header{Value: &Header_Token{Token: tok}})
// validation header is not last
t.Run("error validation header is not last", func(t *testing.T) {
err = obj.Verify()
require.EqualError(t, err, ErrHeaderNotFound.Error())
})
obj.Headers = obj.Headers[:len(obj.Headers)-2]
obj.AddHeader(&Header{Value: &Header_Token{Token: tok}})
obj.SetHeader(&Header{Value: &Header_Integrity{ih}})
t.Run("error invalid header checksum", func(t *testing.T) {
err = obj.Verify()
require.EqualError(t, err, ErrVerifyHeader.Error())
})
obj.Headers = obj.Headers[:len(obj.Headers)-1]
genIH, err := CreateIntegrityHeader(obj, sessionkey)
require.NoError(t, err)
obj.SetHeader(genIH)
t.Run("error invalid payload checksum", func(t *testing.T) {
err = obj.Verify()
require.EqualError(t, err, ErrVerifyPayload.Error())
})
obj.SetHeader(&Header{Value: &Header_PayloadChecksum{obj.PayloadChecksum()}})
obj.Headers = obj.Headers[:len(obj.Headers)-1]
genIH, err = CreateIntegrityHeader(obj, sessionkey)
require.NoError(t, err)
obj.SetHeader(genIH)
t.Run("correct with tok", func(t *testing.T) {
err = obj.Verify()
require.NoError(t, err)
})
pkh := Header{Value: &Header_PublicKey{&PublicKey{
Value: crypto.MarshalPublicKey(&key.PublicKey),
}}}
// replace tok with pkh
obj.Headers[len(obj.Headers)-2] = pkh
// re-sign object
obj.Sign(sessionkey)
t.Run("incorrect with bad public key", func(t *testing.T) {
err = obj.Verify()
require.Error(t, err)
})
obj.SetHeader(&Header{Value: &Header_PublicKey{&PublicKey{
Value: dataPK,
}}})
obj.Sign(sessionkey)
t.Run("correct with good public key", func(t *testing.T) {
err = obj.Verify()
require.NoError(t, err)
})
}

View file

@ -1,43 +0,0 @@
package query
import (
"strings"
"github.com/gogo/protobuf/proto"
)
var (
_ proto.Message = (*Query)(nil)
_ proto.Message = (*Filter)(nil)
)
// String returns string representation of Filter.
func (m Filter) String() string {
b := new(strings.Builder)
b.WriteString("<Filter '$" + m.Name + "' ")
switch m.Type {
case Filter_Exact:
b.WriteString("==")
case Filter_Regex:
b.WriteString("~=")
default:
b.WriteString("??")
}
b.WriteString(" '" + m.Value + "'>")
return b.String()
}
// String returns string representation of Query.
func (m Query) String() string {
b := new(strings.Builder)
b.WriteString("<Query [")
ln := len(m.Filters)
for i := 0; i < ln; i++ {
b.WriteString(m.Filters[i].String())
if ln-1 != i {
b.WriteByte(',')
}
}
b.WriteByte(']')
return b.String()
}

View file

@ -1,641 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: query/types.proto
package query
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Filter_Type int32
const (
// Exact sets when value of filter should be equal to the header value
Filter_Exact Filter_Type = 0
// Regex sets when value of filter should match the header value by the regular expression
Filter_Regex Filter_Type = 1
)
var Filter_Type_name = map[int32]string{
0: "Exact",
1: "Regex",
}
var Filter_Type_value = map[string]int32{
"Exact": 0,
"Regex": 1,
}
func (x Filter_Type) String() string {
return proto.EnumName(Filter_Type_name, int32(x))
}
func (Filter_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_c682aeaf51d46f4d, []int{0, 0}
}
type Filter struct {
// Type of filter
Type Filter_Type `protobuf:"varint,1,opt,name=type,proto3,enum=query.Filter_Type" json:"type,omitempty"`
// Name of field that should be filtered
Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
// Value that should be used for filter
Value string `protobuf:"bytes,3,opt,name=Value,proto3" json:"Value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Filter) Reset() { *m = Filter{} }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) {
return fileDescriptor_c682aeaf51d46f4d, []int{0}
}
func (m *Filter) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Filter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Filter.Merge(m, src)
}
func (m *Filter) XXX_Size() int {
return m.Size()
}
func (m *Filter) XXX_DiscardUnknown() {
xxx_messageInfo_Filter.DiscardUnknown(m)
}
var xxx_messageInfo_Filter proto.InternalMessageInfo
func (m *Filter) GetType() Filter_Type {
if m != nil {
return m.Type
}
return Filter_Exact
}
func (m *Filter) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Filter) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type Query struct {
// Filters is set of filters, should not be empty
Filters []Filter `protobuf:"bytes,1,rep,name=Filters,proto3" json:"Filters"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Query) Reset() { *m = Query{} }
func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) {
return fileDescriptor_c682aeaf51d46f4d, []int{1}
}
func (m *Query) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Query) XXX_Merge(src proto.Message) {
xxx_messageInfo_Query.Merge(m, src)
}
func (m *Query) XXX_Size() int {
return m.Size()
}
func (m *Query) XXX_DiscardUnknown() {
xxx_messageInfo_Query.DiscardUnknown(m)
}
var xxx_messageInfo_Query proto.InternalMessageInfo
func (m *Query) GetFilters() []Filter {
if m != nil {
return m.Filters
}
return nil
}
func init() {
proto.RegisterEnum("query.Filter_Type", Filter_Type_name, Filter_Type_value)
proto.RegisterType((*Filter)(nil), "query.Filter")
proto.RegisterType((*Query)(nil), "query.Query")
}
func init() { proto.RegisterFile("query/types.proto", fileDescriptor_c682aeaf51d46f4d) }
var fileDescriptor_c682aeaf51d46f4d = []byte{
// 296 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x2c, 0x4d, 0x2d,
0xaa, 0xd4, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05,
0x0b, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7,
0xa7, 0xe7, 0xeb, 0x83, 0x65, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0xe8, 0x52,
0xea, 0x60, 0xe4, 0x62, 0x73, 0xcb, 0xcc, 0x29, 0x49, 0x2d, 0x12, 0x32, 0xe0, 0x62, 0x01, 0x99,
0x27, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x67, 0x24, 0xa4, 0x07, 0x36, 0x4f, 0x0f, 0x22, 0xa9, 0x17,
0x52, 0x59, 0x90, 0xea, 0xc4, 0xf1, 0xe8, 0x9e, 0x3c, 0x0b, 0x88, 0x15, 0x04, 0x56, 0x29, 0x24,
0xc4, 0xc5, 0xe2, 0x97, 0x98, 0x9b, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b,
0x89, 0x70, 0xb1, 0x86, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x83, 0x05, 0x21, 0x1c, 0x25, 0x19,
0x2e, 0xb0, 0x3e, 0x21, 0x4e, 0x2e, 0x56, 0xd7, 0x8a, 0xc4, 0xe4, 0x12, 0x01, 0x06, 0x10, 0x33,
0x28, 0x35, 0x3d, 0xb5, 0x42, 0x80, 0xd1, 0x8a, 0x65, 0xc6, 0x02, 0x79, 0x06, 0x25, 0x1b, 0x2e,
0xd6, 0x40, 0x90, 0x95, 0x42, 0xba, 0x5c, 0xec, 0x10, 0x5b, 0x8b, 0x25, 0x18, 0x15, 0x98, 0x35,
0xb8, 0x8d, 0x78, 0x51, 0xdc, 0xe2, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x4c, 0x0d, 0x44,
0xb7, 0x93, 0xff, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0xde, 0x78, 0x24, 0xc7, 0xf8,
0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x51, 0xea, 0x48, 0xa1, 0x91, 0x57, 0x5c, 0x90,
0x9c, 0xac, 0x9b, 0x92, 0x5a, 0xa6, 0x9f, 0x97, 0x9a, 0x9f, 0x56, 0xac, 0x9b, 0x58, 0x90, 0xa9,
0x9b, 0x9e, 0xaf, 0x0f, 0x36, 0x7a, 0x15, 0x13, 0xbf, 0x5f, 0x6a, 0xbe, 0x5b, 0xb0, 0x9e, 0x63,
0x80, 0xa7, 0x1e, 0xd8, 0x15, 0x49, 0x6c, 0xe0, 0x00, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff,
0x83, 0xc6, 0x2b, 0x05, 0x6b, 0x01, 0x00, 0x00,
}
func (m *Filter) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Filter) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Filter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Value) > 0 {
i -= len(m.Value)
copy(dAtA[i:], m.Value)
i = encodeVarintTypes(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x1a
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintTypes(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x12
}
if m.Type != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Query) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Query) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Query) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Filters) > 0 {
for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Filters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
offset -= sovTypes(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Filter) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Type != 0 {
n += 1 + sovTypes(uint64(m.Type))
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Query) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Filters) > 0 {
for _, e := range m.Filters {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovTypes(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozTypes(x uint64) (n int) {
return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Filter) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Filter: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Filter: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= Filter_Type(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Query) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Query: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Query: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Filters = append(m.Filters, Filter{})
if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTypes(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthTypes
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupTypes
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthTypes
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group")
)

View file

@ -1,33 +0,0 @@
syntax = "proto3";
package query;
option go_package = "github.com/nspcc-dev/neofs-api-go/query";
option csharp_namespace = "NeoFS.API.Query";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
message Filter {
option (gogoproto.goproto_stringer) = false;
enum Type {
// Exact sets when value of filter should be equal to the header value
Exact = 0;
// Regex sets when value of filter should match the header value by the regular expression
Regex = 1;
}
// Type of filter
Type type = 1 [(gogoproto.customname) = "Type"];
// Name of field that should be filtered
string Name = 2;
// Value that should be used for filter
string Value = 3;
}
message Query {
option (gogoproto.goproto_stringer) = false;
// Filters is set of filters, should not be empty
repeated Filter Filters = 1 [(gogoproto.nullable) = false];
}

View file

@ -1,80 +0,0 @@
package refs
import (
"crypto/sha256"
"strings"
"github.com/gogo/protobuf/proto"
"github.com/nspcc-dev/neofs-api-go/internal"
)
const (
joinSeparator = "/"
// ErrWrongAddress is raised when wrong address is passed to Address.Parse ParseAddress.
ErrWrongAddress = internal.Error("wrong address")
// ErrEmptyAddress is raised when empty address is passed to Address.Parse ParseAddress.
ErrEmptyAddress = internal.Error("empty address")
)
// ParseAddress parses address from string representation into new Address.
func ParseAddress(str string) (*Address, error) {
var addr Address
return &addr, addr.Parse(str)
}
// Parse parses address from string representation into current Address.
func (m *Address) Parse(addr string) error {
if m == nil {
return ErrEmptyAddress
}
items := strings.Split(addr, joinSeparator)
if len(items) != 2 {
return ErrWrongAddress
}
if err := m.CID.Parse(items[0]); err != nil {
return err
} else if err := m.ObjectID.Parse(items[1]); err != nil {
return err
}
return nil
}
// String returns string representation of Address.
func (m Address) String() string {
return strings.Join([]string{m.CID.String(), m.ObjectID.String()}, joinSeparator)
}
// IsFull checks that ContainerID and ObjectID is not empty.
func (m Address) IsFull() bool {
return !m.CID.Empty() && !m.ObjectID.Empty()
}
// Equal checks that current Address is equal to passed Address.
func (m Address) Equal(a2 *Address) bool {
return m.CID.Equal(a2.CID) && m.ObjectID.Equal(a2.ObjectID)
}
// Hash returns []byte that used as a key for storage bucket.
func (m Address) Hash() ([]byte, error) {
if !m.IsFull() {
return nil, ErrEmptyAddress
}
h := sha256.Sum256(append(m.ObjectID.Bytes(), m.CID.Bytes()...))
return h[:], nil
}
// Merge used by proto.Clone
func (m *Address) Merge(src proto.Message) {
if addr, ok := src.(*Address); ok {
cid := proto.Clone(&addr.CID).(*CID)
oid := proto.Clone(&addr.ObjectID).(*ObjectID)
m.CID = *cid
m.ObjectID = *oid
}
}

View file

@ -1,104 +0,0 @@
package refs
import (
"bytes"
"crypto/sha256"
"github.com/gogo/protobuf/proto"
"github.com/mr-tron/base58"
"github.com/pkg/errors"
)
// CIDForBytes creates CID for passed bytes.
func CIDForBytes(data []byte) CID { return sha256.Sum256(data) }
// CIDFromBytes parses CID from passed bytes.
func CIDFromBytes(data []byte) (cid CID, err error) {
if ln := len(data); ln != CIDSize {
return CID{}, errors.Wrapf(ErrWrongDataSize, "expect=%d, actual=%d", CIDSize, ln)
}
copy(cid[:], data)
return
}
// CIDFromString parses CID from string representation of CID.
func CIDFromString(c string) (CID, error) {
var cid CID
decoded, err := base58.Decode(c)
if err != nil {
return cid, err
}
return CIDFromBytes(decoded)
}
// Size returns size of CID (CIDSize).
func (c CID) Size() int { return CIDSize }
// Parse tries to parse CID from string representation.
func (c *CID) Parse(cid string) error {
var err error
if *c, err = CIDFromString(cid); err != nil {
return err
}
return nil
}
// Empty checks that current CID is empty.
func (c CID) Empty() bool { return bytes.Equal(c.Bytes(), emptyCID) }
// Equal checks that current CID is equal to passed CID.
func (c CID) Equal(cid CID) bool { return bytes.Equal(c.Bytes(), cid.Bytes()) }
// Marshal returns CID bytes representation.
func (c CID) Marshal() ([]byte, error) { return c.Bytes(), nil }
// MarshalBinary returns CID bytes representation.
func (c CID) MarshalBinary() ([]byte, error) { return c.Bytes(), nil }
// MarshalTo marshal CID to bytes representation into passed bytes.
func (c *CID) MarshalTo(data []byte) (int, error) { return copy(data, c.Bytes()), nil }
// ProtoMessage method to satisfy proto.Message interface.
func (c CID) ProtoMessage() {}
// String returns string representation of CID.
func (c CID) String() string { return base58.Encode(c[:]) }
// Reset resets current CID to zero value.
func (c *CID) Reset() { *c = CID{} }
// Bytes returns CID bytes representation.
func (c CID) Bytes() []byte {
buf := make([]byte, CIDSize)
copy(buf, c[:])
return buf
}
// UnmarshalBinary tries to parse bytes representation of CID.
func (c *CID) UnmarshalBinary(data []byte) error { return c.Unmarshal(data) }
// Unmarshal tries to parse bytes representation of CID.
func (c *CID) Unmarshal(data []byte) error {
if ln := len(data); ln != CIDSize {
return errors.Wrapf(ErrWrongDataSize, "expect=%d, actual=%d", CIDSize, ln)
}
copy((*c)[:], data)
return nil
}
// Verify validates that current CID is generated for passed bytes data.
func (c CID) Verify(data []byte) error {
if id := CIDForBytes(data); !bytes.Equal(c[:], id[:]) {
return errors.New("wrong hash for data")
}
return nil
}
// Merge used by proto.Clone
func (c *CID) Merge(src proto.Message) {
if cid, ok := src.(*CID); ok {
*c = *cid
}
}

View file

@ -1,77 +0,0 @@
package refs
import (
"bytes"
"crypto/ecdsa"
"github.com/gogo/protobuf/proto"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neofs-api-go/chain"
"github.com/pkg/errors"
)
// NewOwnerID returns generated OwnerID from passed public key.
func NewOwnerID(key *ecdsa.PublicKey) (owner OwnerID, err error) {
if key == nil {
return
}
var d []byte
d, err = base58.Decode(chain.KeyToAddress(key))
if err != nil {
return
}
copy(owner[:], d)
return
}
// Size returns OwnerID size in bytes (OwnerIDSize).
func (OwnerID) Size() int { return OwnerIDSize }
// Empty checks that current OwnerID is empty value.
func (o OwnerID) Empty() bool { return bytes.Equal(o.Bytes(), emptyOwner) }
// Equal checks that current OwnerID is equal to passed OwnerID.
func (o OwnerID) Equal(id OwnerID) bool { return bytes.Equal(o.Bytes(), id.Bytes()) }
// Reset sets current OwnerID to empty value.
func (o *OwnerID) Reset() { *o = OwnerID{} }
// ProtoMessage method to satisfy proto.Message interface.
func (OwnerID) ProtoMessage() {}
// Marshal returns OwnerID bytes representation.
func (o OwnerID) Marshal() ([]byte, error) { return o.Bytes(), nil }
// MarshalTo copies OwnerID bytes representation into passed slice of bytes.
func (o OwnerID) MarshalTo(data []byte) (int, error) { return copy(data, o.Bytes()), nil }
// String returns string representation of OwnerID.
func (o OwnerID) String() string { return base58.Encode(o[:]) }
// Bytes returns OwnerID bytes representation.
func (o OwnerID) Bytes() []byte {
buf := make([]byte, OwnerIDSize)
copy(buf, o[:])
return buf
}
// Unmarshal tries to parse OwnerID bytes representation into current OwnerID.
func (o *OwnerID) Unmarshal(data []byte) error {
if ln := len(data); ln != OwnerIDSize {
return errors.Wrapf(ErrWrongDataSize, "expect=%d, actual=%d", OwnerIDSize, ln)
}
copy((*o)[:], data)
return nil
}
// Merge used by proto.Clone
func (o *OwnerID) Merge(src proto.Message) {
if uid, ok := src.(*OwnerID); ok {
*o = *uid
}
}

View file

@ -1,14 +0,0 @@
package refs
import (
"github.com/pkg/errors"
)
// SGIDFromBytes parse bytes representation of SGID into new SGID value.
func SGIDFromBytes(data []byte) (sgid SGID, err error) {
if ln := len(data); ln != SGIDSize {
return SGID{}, errors.Wrapf(ErrWrongDataSize, "expect=%d, actual=%d", SGIDSize, ln)
}
copy(sgid[:], data)
return
}

View file

@ -1,123 +0,0 @@
// This package contains basic structures implemented in Go, such as
//
// CID - container id
// OwnerID - owner id
// ObjectID - object id
// SGID - storage group id
// Address - contains object id and container id
// UUID - a 128 bit (16 byte) Universal Unique Identifier as defined in RFC 4122
package refs
import (
"crypto/sha256"
"github.com/google/uuid"
"github.com/nspcc-dev/neofs-api-go/chain"
"github.com/nspcc-dev/neofs-api-go/internal"
)
type (
// CID is implementation of ContainerID.
CID [CIDSize]byte
// UUID wrapper over github.com/google/uuid.UUID.
UUID uuid.UUID
// SGID is type alias of UUID.
SGID = UUID
// ObjectID is type alias of UUID.
ObjectID = UUID
// MessageID is type alias of UUID.
MessageID = UUID
// OwnerID is wrapper over neofs-proto/chain.WalletAddress.
OwnerID chain.WalletAddress
)
// OwnerIDSource is an interface of the container of an OwnerID value with read access.
type OwnerIDSource interface {
GetOwnerID() OwnerID
}
// OwnerIDContainer is an interface of the container of an OwnerID value.
type OwnerIDContainer interface {
OwnerIDSource
SetOwnerID(OwnerID)
}
// AddressContainer is an interface of the container of object address value.
type AddressContainer interface {
GetAddress() Address
SetAddress(Address)
}
const (
// UUIDSize contains size of UUID.
UUIDSize = 16
// SGIDSize contains size of SGID.
SGIDSize = UUIDSize
// CIDSize contains size of CID.
CIDSize = sha256.Size
// OwnerIDSize contains size of OwnerID.
OwnerIDSize = chain.AddressLength
// ErrWrongDataSize is raised when passed bytes into Unmarshal have wrong size.
ErrWrongDataSize = internal.Error("wrong data size")
// ErrEmptyOwner is raised when empty OwnerID is passed into container.New.
ErrEmptyOwner = internal.Error("owner cant be empty")
// ErrEmptyCapacity is raised when empty Capacity is passed container.New.
ErrEmptyCapacity = internal.Error("capacity cant be empty")
// ErrEmptyContainer is raised when it CID method is called for an empty container.
ErrEmptyContainer = internal.Error("cannot return ID for empty container")
)
var (
emptyCID = (CID{}).Bytes()
emptyUUID = (UUID{}).Bytes()
emptyOwner = (OwnerID{}).Bytes()
_ internal.Custom = (*CID)(nil)
_ internal.Custom = (*SGID)(nil)
_ internal.Custom = (*UUID)(nil)
_ internal.Custom = (*OwnerID)(nil)
_ internal.Custom = (*ObjectID)(nil)
_ internal.Custom = (*MessageID)(nil)
// NewSGID method alias.
NewSGID = NewUUID
// NewObjectID method alias.
NewObjectID = NewUUID
// NewMessageID method alias.
NewMessageID = NewUUID
)
// NewUUID returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewUUID() (UUID, error) {
id, err := uuid.NewRandom()
if err != nil {
return UUID{}, err
}
return UUID(id), nil
}

View file

@ -1,372 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: refs/types.proto
package refs
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Address of object (container id + object id)
type Address struct {
// ObjectID is an object identifier, valid UUIDv4 represented in bytes
ObjectID ObjectID `protobuf:"bytes,1,opt,name=ObjectID,proto3,customtype=ObjectID" json:"ObjectID"`
// CID is container identifier
CID CID `protobuf:"bytes,2,opt,name=CID,proto3,customtype=CID" json:"CID"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Address) Reset() { *m = Address{} }
func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) {
return fileDescriptor_063a64a96d952d31, []int{0}
}
func (m *Address) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Address) XXX_Merge(src proto.Message) {
xxx_messageInfo_Address.Merge(m, src)
}
func (m *Address) XXX_Size() int {
return m.Size()
}
func (m *Address) XXX_DiscardUnknown() {
xxx_messageInfo_Address.DiscardUnknown(m)
}
var xxx_messageInfo_Address proto.InternalMessageInfo
func init() {
proto.RegisterType((*Address)(nil), "refs.Address")
}
func init() { proto.RegisterFile("refs/types.proto", fileDescriptor_063a64a96d952d31) }
var fileDescriptor_063a64a96d952d31 = []byte{
// 221 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x4a, 0x4d, 0x2b,
0xd6, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x01, 0x89,
0x48, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7,
0xe7, 0xeb, 0x83, 0x25, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68, 0x52, 0x0a,
0xe3, 0x62, 0x77, 0x4c, 0x49, 0x29, 0x4a, 0x2d, 0x2e, 0x16, 0xd2, 0xe1, 0xe2, 0xf0, 0x4f, 0xca,
0x4a, 0x4d, 0x2e, 0xf1, 0x74, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x71, 0x12, 0x38, 0x71, 0x4f,
0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0xb8, 0x78, 0x10, 0x9c, 0x25, 0x24, 0xcb, 0xc5, 0xec, 0xec, 0xe9,
0x22, 0xc1, 0x04, 0x56, 0xc8, 0x0d, 0x55, 0x08, 0x12, 0x0a, 0x02, 0x11, 0x4e, 0xa1, 0x37, 0x1e,
0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x1b,
0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0x0d, 0xc9, 0x91,
0x79, 0xc5, 0x05, 0xc9, 0xc9, 0xba, 0x29, 0xa9, 0x65, 0xfa, 0x79, 0xa9, 0xf9, 0x69, 0xc5, 0xba,
0x89, 0x05, 0x99, 0xba, 0xe9, 0xf9, 0xfa, 0x20, 0xcf, 0xac, 0x62, 0xe2, 0xf3, 0x4b, 0xcd, 0x77,
0x0b, 0xd6, 0x73, 0x0c, 0xf0, 0xd4, 0x0b, 0x4a, 0x4d, 0x2b, 0x4e, 0x62, 0x03, 0xbb, 0xda, 0x18,
0x10, 0x00, 0x00, 0xff, 0xff, 0xae, 0x9a, 0x6d, 0x68, 0xfe, 0x00, 0x00, 0x00,
}
func (m *Address) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Address) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Address) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
{
size := m.CID.Size()
i -= size
if _, err := m.CID.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size := m.ObjectID.Size()
i -= size
if _, err := m.ObjectID.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
offset -= sovTypes(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Address) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectID.Size()
n += 1 + l + sovTypes(uint64(l))
l = m.CID.Size()
n += 1 + l + sovTypes(uint64(l))
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovTypes(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozTypes(x uint64) (n int) {
return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Address) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Address: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Address: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.CID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTypes(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthTypes
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupTypes
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthTypes
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group")
)

View file

@ -1,19 +0,0 @@
syntax = "proto3";
package refs;
option go_package = "github.com/nspcc-dev/neofs-api-go/refs";
option csharp_namespace = "NeoFS.API.Refs";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
option (gogoproto.stringer_all) = false;
option (gogoproto.goproto_stringer_all) = false;
// Address of object (container id + object id)
message Address {
// ObjectID is an object identifier, valid UUIDv4 represented in bytes
bytes ObjectID = 1[(gogoproto.customtype) = "ObjectID", (gogoproto.nullable) = false];
// CID is container identifier
bytes CID = 2[(gogoproto.customtype) = "CID", (gogoproto.nullable) = false];
}

View file

@ -1,141 +0,0 @@
package refs
import (
"strings"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/google/uuid"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestSGID(t *testing.T) {
t.Run("check that marshal/unmarshal works like expected", func(t *testing.T) {
var sgid1, sgid2 UUID
sgid1, err := NewSGID()
require.NoError(t, err)
data, err := proto.Marshal(&sgid1)
require.NoError(t, err)
require.NoError(t, sgid2.Unmarshal(data))
require.Equal(t, sgid1, sgid2)
})
t.Run("check that proto.Clone works like expected", func(t *testing.T) {
var (
sgid1 UUID
sgid2 *UUID
)
sgid1, err := NewSGID()
require.NoError(t, err)
sgid2 = proto.Clone(&sgid1).(*SGID)
require.Equal(t, sgid1, *sgid2)
})
}
func TestUUID(t *testing.T) {
t.Run("parse should work like expected", func(t *testing.T) {
var u UUID
id, err := uuid.NewRandom()
require.NoError(t, err)
require.NoError(t, u.Parse(id.String()))
require.Equal(t, id.String(), u.String())
})
t.Run("check that marshal/unmarshal works like expected", func(t *testing.T) {
var u1, u2 UUID
u1 = UUID{0x8f, 0xe4, 0xeb, 0xa0, 0xb8, 0xfb, 0x49, 0x3b, 0xbb, 0x1d, 0x1d, 0x13, 0x6e, 0x69, 0xfc, 0xf7}
data, err := proto.Marshal(&u1)
require.NoError(t, err)
require.NoError(t, u2.Unmarshal(data))
require.Equal(t, u1, u2)
})
t.Run("check that marshal/unmarshal works like expected even for msg id", func(t *testing.T) {
var u2 MessageID
u1, err := NewMessageID()
require.NoError(t, err)
data, err := proto.Marshal(&u1)
require.NoError(t, err)
require.NoError(t, u2.Unmarshal(data))
require.Equal(t, u1, u2)
})
}
func TestOwnerID(t *testing.T) {
t.Run("check that marshal/unmarshal works like expected", func(t *testing.T) {
var u1, u2 OwnerID
owner, err := NewOwnerID(nil)
require.NoError(t, err)
require.True(t, owner.Empty())
key := test.DecodeKey(0)
u1, err = NewOwnerID(&key.PublicKey)
require.NoError(t, err)
data, err := proto.Marshal(&u1)
require.NoError(t, err)
require.NoError(t, u2.Unmarshal(data))
require.Equal(t, u1, u2)
})
t.Run("check that proto.Clone works like expected", func(t *testing.T) {
var u2 *OwnerID
key := test.DecodeKey(0)
u1, err := NewOwnerID(&key.PublicKey)
require.NoError(t, err)
u2 = proto.Clone(&u1).(*OwnerID)
require.Equal(t, u1, *u2)
})
}
func TestAddress(t *testing.T) {
cid := CIDForBytes([]byte("test"))
id, err := NewObjectID()
require.NoError(t, err)
expect := strings.Join([]string{
cid.String(),
id.String(),
}, joinSeparator)
require.NotPanics(t, func() {
actual := (Address{
ObjectID: id,
CID: cid,
}).String()
require.Equal(t, expect, actual)
})
var temp Address
require.NoError(t, temp.Parse(expect))
require.Equal(t, expect, temp.String())
actual, err := ParseAddress(expect)
require.NoError(t, err)
require.Equal(t, expect, actual.String())
addr := proto.Clone(actual).(*Address)
require.Equal(t, actual, addr)
require.Equal(t, expect, addr.String())
}

View file

@ -1,84 +0,0 @@
package refs
import (
"bytes"
"encoding/hex"
"github.com/gogo/protobuf/proto"
"github.com/google/uuid"
"github.com/pkg/errors"
)
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Size returns size in bytes of UUID (UUIDSize).
func (UUID) Size() int { return UUIDSize }
// Empty checks that current UUID is empty value.
func (u UUID) Empty() bool { return bytes.Equal(u.Bytes(), emptyUUID) }
// Reset sets current UUID to empty value.
func (u *UUID) Reset() { *u = [UUIDSize]byte{} }
// ProtoMessage method to satisfy proto.Message.
func (UUID) ProtoMessage() {}
// Marshal returns UUID bytes representation.
func (u UUID) Marshal() ([]byte, error) { return u.Bytes(), nil }
// MarshalTo returns UUID bytes representation.
func (u UUID) MarshalTo(data []byte) (int, error) { return copy(data, u[:]), nil }
// Bytes returns UUID bytes representation.
func (u UUID) Bytes() []byte {
buf := make([]byte, UUIDSize)
copy(buf, u[:])
return buf
}
// Equal checks that current UUID is equal to passed UUID.
func (u UUID) Equal(u2 UUID) bool { return bytes.Equal(u.Bytes(), u2.Bytes()) }
func (u UUID) String() string {
var buf [36]byte
encodeHex(buf[:], u)
return string(buf[:])
}
// Unmarshal tries to parse UUID bytes representation.
func (u *UUID) Unmarshal(data []byte) error {
if ln := len(data); ln != UUIDSize {
return errors.Wrapf(ErrWrongDataSize, "expect=%d, actual=%d", UUIDSize, ln)
}
copy((*u)[:], data)
return nil
}
// Parse tries to parse UUID string representation.
func (u *UUID) Parse(id string) error {
tmp, err := uuid.Parse(id)
if err != nil {
return errors.Wrapf(err, "could not parse `%s`", id)
}
copy((*u)[:], tmp[:])
return nil
}
// Merge used by proto.Clone
func (u *UUID) Merge(src proto.Message) {
if tmp, ok := src.(*UUID); ok {
*u = *tmp
}
}

View file

@ -1,20 +0,0 @@
package service
import (
"github.com/nspcc-dev/neofs-api-go/refs"
)
// TokenID is a type alias of UUID ref.
type TokenID = refs.UUID
// OwnerID is a type alias of OwnerID ref.
type OwnerID = refs.OwnerID
// Address is a type alias of Address ref.
type Address = refs.Address
// AddressContainer is a type alias of refs.AddressContainer.
type AddressContainer = refs.AddressContainer
// OwnerIDContainer is a type alias of refs.OwnerIDContainer.
type OwnerIDContainer = refs.OwnerIDContainer

View file

@ -1,140 +0,0 @@
package service
import (
"crypto/ecdsa"
"io"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
)
type signedBearerToken struct {
BearerToken
}
type bearerMsgWrapper struct {
*BearerTokenMsg
}
const fixedBearerTokenDataSize = 0 +
refs.OwnerIDSize +
8
// NewSignedBearerToken wraps passed BearerToken in a component suitable for signing.
//
// Result can be used in AddSignatureWithKey function.
func NewSignedBearerToken(token BearerToken) DataWithSignKeyAccumulator {
return &signedBearerToken{
BearerToken: token,
}
}
// NewVerifiedBearerToken wraps passed SessionToken in a component suitable for signature verification.
//
// Result can be used in VerifySignatureWithKey function.
func NewVerifiedBearerToken(token BearerToken) DataWithSignature {
return &signedBearerToken{
BearerToken: token,
}
}
// AddSignKey calls a Signature field setter and an OwnerKey field setter with corresponding arguments.
func (s signedBearerToken) AddSignKey(sig []byte, key *ecdsa.PublicKey) {
if s.BearerToken != nil {
s.SetSignature(sig)
s.SetOwnerKey(
crypto.MarshalPublicKey(key),
)
}
}
// SignedData returns token information in a binary representation.
func (s signedBearerToken) SignedData() ([]byte, error) {
return SignedDataFromReader(s)
}
// SignedDataSize returns the length of signed token information slice.
func (s signedBearerToken) SignedDataSize() int {
return bearerTokenInfoSize(s.BearerToken)
}
// ReadSignedData copies a binary representation of the token information to passed buffer.
//
// If buffer length is less than required, io.ErrUnexpectedEOF returns.
func (s signedBearerToken) ReadSignedData(p []byte) (int, error) {
sz := s.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
copyBearerTokenSignedData(p, s.BearerToken)
return sz, nil
}
func bearerTokenInfoSize(v ACLRulesSource) int {
if v == nil {
return 0
}
return fixedBearerTokenDataSize + len(v.GetACLRules())
}
// Fills passed buffer with signing token information bytes.
// Does not check buffer length, it is understood that enough space is allocated in it.
//
// If passed BearerTokenInfo, buffer remains unchanged.
func copyBearerTokenSignedData(buf []byte, token BearerTokenInfo) {
if token == nil {
return
}
var off int
off += copy(buf[off:], token.GetACLRules())
off += copy(buf[off:], token.GetOwnerID().Bytes())
tokenEndianness.PutUint64(buf[off:], token.ExpirationEpoch())
off += 8
}
// SetACLRules is an ACLRules field setter.
func (m *BearerTokenMsg_Info) SetACLRules(v []byte) {
m.ACLRules = v
}
// SetValidUntil is a ValidUntil field setter.
func (m *BearerTokenMsg_Info) SetValidUntil(v uint64) {
m.ValidUntil = v
}
// GetOwnerID if an OwnerID field getter.
func (m BearerTokenMsg_Info) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *BearerTokenMsg_Info) SetOwnerID(v OwnerID) {
m.OwnerID = v
}
// ExpirationEpoch returns the result of ValidUntil field getter.
func (m BearerTokenMsg_Info) ExpirationEpoch() uint64 {
return m.GetValidUntil()
}
// SetExpirationEpoch passes argument to ValidUntil field setter.
func (m *BearerTokenMsg_Info) SetExpirationEpoch(v uint64) {
m.SetValidUntil(v)
}
// SetOwnerKey is an OwnerKey field setter.
func (m *BearerTokenMsg) SetOwnerKey(v []byte) {
m.OwnerKey = v
}
// SetSignature is a Signature field setter.
func (m *BearerTokenMsg) SetSignature(v []byte) {
m.Signature = v
}

View file

@ -1,199 +0,0 @@
package service
import (
"crypto/rand"
"testing"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
type testBearerToken struct {
aclRules []byte
expEpoch uint64
owner OwnerID
key []byte
sig []byte
}
func (s testBearerToken) GetACLRules() []byte {
return s.aclRules
}
func (s *testBearerToken) SetACLRules(v []byte) {
s.aclRules = v
}
func (s testBearerToken) ExpirationEpoch() uint64 {
return s.expEpoch
}
func (s *testBearerToken) SetExpirationEpoch(v uint64) {
s.expEpoch = v
}
func (s testBearerToken) GetOwnerID() OwnerID {
return s.owner
}
func (s *testBearerToken) SetOwnerID(v OwnerID) {
s.owner = v
}
func (s testBearerToken) GetOwnerKey() []byte {
return s.key
}
func (s *testBearerToken) SetOwnerKey(v []byte) {
s.key = v
}
func (s testBearerToken) GetSignature() []byte {
return s.sig
}
func (s *testBearerToken) SetSignature(v []byte) {
s.sig = v
}
func TestBearerTokenMsgGettersSetters(t *testing.T) {
var tok BearerToken = new(testBearerToken)
{ // ACLRules
rules := []byte{1, 2, 3}
tok.SetACLRules(rules)
require.Equal(t, rules, tok.GetACLRules())
}
{ // OwnerID
ownerID := OwnerID{}
_, err := rand.Read(ownerID[:])
require.NoError(t, err)
tok.SetOwnerID(ownerID)
require.Equal(t, ownerID, tok.GetOwnerID())
}
{ // ValidUntil
e := uint64(5)
tok.SetExpirationEpoch(e)
require.Equal(t, e, tok.ExpirationEpoch())
}
{ // OwnerKey
key := make([]byte, 10)
_, err := rand.Read(key)
require.NoError(t, err)
tok.SetOwnerKey(key)
require.Equal(t, key, tok.GetOwnerKey())
}
{ // Signature
sig := make([]byte, 10)
_, err := rand.Read(sig)
require.NoError(t, err)
tok.SetSignature(sig)
require.Equal(t, sig, tok.GetSignature())
}
}
func TestSignVerifyBearerToken(t *testing.T) {
var token BearerToken = new(testBearerToken)
// create private key for signing
sk := test.DecodeKey(0)
pk := &sk.PublicKey
rules := []byte{1, 2, 3}
token.SetACLRules(rules)
ownerID := OwnerID{}
_, err := rand.Read(ownerID[:])
require.NoError(t, err)
token.SetOwnerID(ownerID)
fEpoch := uint64(2)
token.SetExpirationEpoch(fEpoch)
signedToken := NewSignedBearerToken(token)
verifiedToken := NewVerifiedBearerToken(token)
// sign and verify token
require.NoError(t, AddSignatureWithKey(sk, signedToken))
require.NoError(t, VerifySignatureWithKey(pk, verifiedToken))
items := []struct {
corrupt func()
restore func()
}{
{ // ACLRules
corrupt: func() {
token.SetACLRules(append(rules, 1))
},
restore: func() {
token.SetACLRules(rules)
},
},
{ // Owner ID
corrupt: func() {
ownerID[0]++
token.SetOwnerID(ownerID)
},
restore: func() {
ownerID[0]--
token.SetOwnerID(ownerID)
},
},
{ // Expiration epoch
corrupt: func() {
token.SetExpirationEpoch(fEpoch + 1)
},
restore: func() {
token.SetExpirationEpoch(fEpoch)
},
},
}
for _, v := range items {
v.corrupt()
require.Error(t, VerifySignatureWithKey(pk, verifiedToken))
v.restore()
require.NoError(t, VerifySignatureWithKey(pk, verifiedToken))
}
}
func TestBearerTokenMsg_Setters(t *testing.T) {
s := new(BearerTokenMsg)
aclRules := []byte{1, 2, 3}
s.SetACLRules(aclRules)
require.Equal(t, aclRules, s.GetACLRules())
validUntil := uint64(6)
s.SetValidUntil(validUntil)
require.Equal(t, validUntil, s.GetValidUntil())
s.SetExpirationEpoch(validUntil + 1)
require.Equal(t, validUntil+1, s.ExpirationEpoch())
ownerID := OwnerID{1, 2, 3}
s.SetOwnerID(ownerID)
require.Equal(t, ownerID, s.GetOwnerID())
ownerKey := []byte{4, 5, 6}
s.SetOwnerKey(ownerKey)
require.Equal(t, ownerKey, s.GetOwnerKey())
sig := []byte{7, 8, 9}
s.SetSignature(sig)
require.Equal(t, sig, s.GetSignature())
}

View file

@ -1,11 +0,0 @@
package service
// SetEpoch is an Epoch field setter.
func (m *ResponseMetaHeader) SetEpoch(v uint64) {
m.Epoch = v
}
// SetEpoch is an Epoch field setter.
func (m *RequestMetaHeader) SetEpoch(v uint64) {
m.Epoch = v
}

View file

@ -1,21 +0,0 @@
package service
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestGetSetEpoch(t *testing.T) {
v := uint64(5)
items := []EpochContainer{
new(ResponseMetaHeader),
new(RequestMetaHeader),
}
for _, item := range items {
item.SetEpoch(v)
require.Equal(t, v, item.GetEpoch())
}
}

View file

@ -1,53 +0,0 @@
package service
import "github.com/nspcc-dev/neofs-api-go/internal"
// ErrNilToken is returned by functions that expect
// a non-nil token argument, but received nil.
const ErrNilToken = internal.Error("token is nil")
// ErrInvalidTTL means that the TTL value does not
// satisfy a specific criterion.
const ErrInvalidTTL = internal.Error("invalid TTL value")
// ErrInvalidPublicKeyBytes means that the public key could not be unmarshaled.
const ErrInvalidPublicKeyBytes = internal.Error("cannot load public key")
// ErrCannotFindOwner is raised when signatures empty in GetOwner.
const ErrCannotFindOwner = internal.Error("cannot find owner public key")
// ErrWrongOwner is raised when passed OwnerID
// not equal to present PublicKey
const ErrWrongOwner = internal.Error("wrong owner")
// ErrNilSignedDataSource returned by functions that expect a non-nil
// SignedDataSource, but received nil.
const ErrNilSignedDataSource = internal.Error("signed data source is nil")
// ErrNilSignatureKeySource is returned by functions that expect a non-nil
// SignatureKeySource, but received nil.
const ErrNilSignatureKeySource = internal.Error("empty key-signature source")
// ErrEmptyDataWithSignature is returned by functions that expect
// a non-nil DataWithSignature, but received nil.
const ErrEmptyDataWithSignature = internal.Error("empty data with signature")
// ErrNegativeLength is returned by functions that received
// negative length for slice allocation.
const ErrNegativeLength = internal.Error("negative slice length")
// ErrNilRequestSignedData is returned by functions that expect
// a non-nil RequestSignedData, but received nil.
const ErrNilRequestSignedData = internal.Error("request signed data is nil")
// ErrNilRequestVerifyData is returned by functions that expect
// a non-nil RequestVerifyData, but received nil.
const ErrNilRequestVerifyData = internal.Error("request verification data is nil")
// ErrNilSignedDataReader is returned by functions that expect
// a non-nil SignedDataReader, but received nil.
const ErrNilSignedDataReader = internal.Error("signed data reader is nil")
// ErrNilSignKeyPairAccumulator is returned by functions that expect
// a non-nil SignKeyPairAccumulator, but received nil.
const ErrNilSignKeyPairAccumulator = internal.Error("signature-key pair accumulator is nil")

View file

@ -1,131 +0,0 @@
package service
import (
"io"
)
type extHdrWrapper struct {
msg *RequestExtendedHeader_KV
}
type extHdrSrcWrapper struct {
extHdrSrc ExtendedHeadersSource
}
// CutMeta returns current value and sets RequestMetaHeader to empty value.
func (m *RequestMetaHeader) CutMeta() RequestMetaHeader {
cp := *m
m.Reset()
return cp
}
// RestoreMeta sets current RequestMetaHeader to passed value.
func (m *RequestMetaHeader) RestoreMeta(v RequestMetaHeader) {
*m = v
}
// ExtendedHeadersSignedData wraps passed ExtendedHeadersSource and returns SignedDataSource.
func ExtendedHeadersSignedData(headers ExtendedHeadersSource) SignedDataSource {
return &extHdrSrcWrapper{
extHdrSrc: headers,
}
}
// SignedData returns extended headers in a binary representation.
func (s extHdrSrcWrapper) SignedData() ([]byte, error) {
return SignedDataFromReader(s)
}
// SignedDataSize returns the length of extended headers slice.
func (s extHdrSrcWrapper) SignedDataSize() (res int) {
if s.extHdrSrc != nil {
for _, h := range s.extHdrSrc.ExtendedHeaders() {
if h != nil {
res += len(h.Key()) + len(h.Value())
}
}
}
return
}
// ReadSignedData copies a binary representation of the extended headers to passed buffer.
//
// If buffer length is less than required, io.ErrUnexpectedEOF returns.
func (s extHdrSrcWrapper) ReadSignedData(p []byte) (int, error) {
sz := s.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
if s.extHdrSrc != nil {
off := 0
for _, h := range s.extHdrSrc.ExtendedHeaders() {
if h == nil {
continue
}
off += copy(p[off:], []byte(h.Key()))
off += copy(p[off:], []byte(h.Value()))
}
}
return sz, nil
}
// SetK is a K field setter.
func (m *RequestExtendedHeader_KV) SetK(v string) {
m.K = v
}
// SetV is a V field setter.
func (m *RequestExtendedHeader_KV) SetV(v string) {
m.V = v
}
// SetHeaders is a Headers field setter.
func (m *RequestExtendedHeader) SetHeaders(v []RequestExtendedHeader_KV) {
m.Headers = v
}
func wrapExtendedHeaderKV(msg *RequestExtendedHeader_KV) extHdrWrapper {
return extHdrWrapper{
msg: msg,
}
}
// Key returns the result of K field getter.
//
// If message is nil, empty string returns.
func (m extHdrWrapper) Key() string {
if m.msg != nil {
return m.msg.GetK()
}
return ""
}
// Value returns the result of V field getter.
//
// If message is nil, empty string returns.
func (m extHdrWrapper) Value() string {
if m.msg != nil {
return m.msg.GetV()
}
return ""
}
// ExtendedHeaders composes ExtendedHeader list from the Headers field getter result.
func (m RequestExtendedHeader) ExtendedHeaders() []ExtendedHeader {
hs := m.GetHeaders()
res := make([]ExtendedHeader, 0, len(hs))
for i := range hs {
res = append(res, wrapExtendedHeaderKV(&hs[i]))
}
return res
}

File diff suppressed because it is too large Load diff

View file

@ -1,49 +0,0 @@
syntax = "proto3";
package service;
option go_package = "github.com/nspcc-dev/neofs-api-go/service";
option csharp_namespace = "NeoFS.API.Service";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// RequestMetaHeader contains information about request meta headers
// (should be embedded into message)
message RequestMetaHeader {
// TTL must be larger than zero, it decreased in every NeoFS Node
uint32 TTL = 1;
// Epoch for user can be empty, because node sets epoch to the actual value
uint64 Epoch = 2;
// Version defines protocol version
// TODO: not used for now, should be implemented in future
uint32 Version = 3;
// Raw determines whether the request is raw or not
bool Raw = 4;
// ExtendedHeader carries extended headers of the request
RequestExtendedHeader ExtendedHeader = 5 [(gogoproto.embed) = true, (gogoproto.nullable) = false];
}
// ResponseMetaHeader contains meta information based on request processing by server
// (should be embedded into message)
message ResponseMetaHeader {
// Current NeoFS epoch on server
uint64 Epoch = 1;
// Version defines protocol version
// TODO: not used for now, should be implemented in future
uint32 Version = 2;
}
// RequestExtendedHeader contains extended headers of request
message RequestExtendedHeader {
// KV contains string key-value pair
message KV {
// K carries extended header key
string K = 1;
// V carries extended header value
string V = 2;
}
// Headers carries list of key-value headers
repeated KV Headers = 1 [(gogoproto.nullable) = false];
}

View file

@ -1,99 +0,0 @@
package service
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCutRestoreMeta(t *testing.T) {
items := []func() SeizedMetaHeaderContainer{
func() SeizedMetaHeaderContainer {
m := new(RequestMetaHeader)
m.SetEpoch(1)
return m
},
}
for _, item := range items {
v1 := item()
m1 := v1.CutMeta()
v1.RestoreMeta(m1)
require.Equal(t, item(), v1)
}
}
func TestRequestExtendedHeader_KV_Setters(t *testing.T) {
s := new(RequestExtendedHeader_KV)
key := "key"
s.SetK(key)
require.Equal(t, key, s.GetK())
val := "val"
s.SetV(val)
require.Equal(t, val, s.GetV())
}
func TestRequestExtendedHeader_SetHeaders(t *testing.T) {
s := new(RequestExtendedHeader)
hdr := RequestExtendedHeader_KV{}
hdr.SetK("key")
hdr.SetV("val")
hdrs := []RequestExtendedHeader_KV{
hdr,
}
s.SetHeaders(hdrs)
require.Equal(t, hdrs, s.GetHeaders())
}
func TestExtHdrWrapper(t *testing.T) {
s := wrapExtendedHeaderKV(nil)
require.Empty(t, s.Key())
require.Empty(t, s.Value())
msg := new(RequestExtendedHeader_KV)
s = wrapExtendedHeaderKV(msg)
key := "key"
msg.SetK(key)
require.Equal(t, key, s.Key())
val := "val"
msg.SetV(val)
require.Equal(t, val, s.Value())
}
func TestRequestExtendedHeader_ExtendedHeaders(t *testing.T) {
var (
k1, v1 = "key1", "value1"
k2, v2 = "key2", "value2"
h1 = new(RequestExtendedHeader_KV)
h2 = new(RequestExtendedHeader_KV)
)
h1.SetK(k1)
h1.SetV(v1)
h2.SetK(k2)
h2.SetV(v2)
s := new(RequestExtendedHeader)
s.SetHeaders([]RequestExtendedHeader_KV{
*h1, *h2,
})
xHdrs := s.ExtendedHeaders()
require.Len(t, xHdrs, 2)
require.Equal(t, k1, xHdrs[0].Key())
require.Equal(t, v1, xHdrs[0].Value())
require.Equal(t, k2, xHdrs[1].Key())
require.Equal(t, v2, xHdrs[1].Value())
}

View file

@ -1,6 +0,0 @@
package service
// SetRaw is a Raw field setter.
func (m *RequestMetaHeader) SetRaw(raw bool) {
m.Raw = raw
}

View file

@ -1,24 +0,0 @@
package service
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestGetSetRaw(t *testing.T) {
items := []RawContainer{
new(RequestMetaHeader),
}
for _, item := range items {
// init with false
item.SetRaw(false)
item.SetRaw(true)
require.True(t, item.GetRaw())
item.SetRaw(false)
require.False(t, item.GetRaw())
}
}

View file

@ -1,37 +0,0 @@
package service
import "encoding/binary"
const (
_ NodeRole = iota
// InnerRingNode that work like IR node.
InnerRingNode
// StorageNode that work like a storage node.
StorageNode
)
// String is method, that represent NodeRole as string.
func (nt NodeRole) String() string {
switch nt {
case InnerRingNode:
return "InnerRingNode"
case StorageNode:
return "StorageNode"
default:
return "Unknown"
}
}
// Size returns the size necessary for a binary representation of the NodeRole.
func (nt NodeRole) Size() int {
return 4
}
// Bytes returns a binary representation of the NodeRole.
func (nt NodeRole) Bytes() []byte {
data := make([]byte, nt.Size())
binary.BigEndian.PutUint32(data, uint32(nt))
return data
}

View file

@ -1,23 +0,0 @@
package service
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNodeRole_String(t *testing.T) {
tests := []struct {
nt NodeRole
want string
}{
{want: "Unknown"},
{nt: StorageNode, want: "StorageNode"},
{nt: InnerRingNode, want: "InnerRingNode"},
}
for _, tt := range tests {
t.Run(tt.want, func(t *testing.T) {
require.Equal(t, tt.want, tt.nt.String())
})
}
}

View file

@ -1,404 +0,0 @@
package service
import (
"crypto/ecdsa"
"io"
"sync"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/pkg/errors"
)
type keySign struct {
key *ecdsa.PublicKey
sign []byte
}
type signSourceGroup struct {
SignKeyPairSource
SignKeyPairAccumulator
sources []SignedDataSource
}
type signReadersGroup struct {
SignKeyPairSource
SignKeyPairAccumulator
readers []SignedDataReader
}
var bytesPool = sync.Pool{
New: func() interface{} {
return make([]byte, 5<<20)
},
}
// GetSignature is a sign field getter.
func (s keySign) GetSignature() []byte {
return s.sign
}
// GetPublicKey is a key field getter,
func (s keySign) GetPublicKey() *ecdsa.PublicKey {
return s.key
}
// Unites passed key with signature and returns SignKeyPair interface.
func newSignatureKeyPair(key *ecdsa.PublicKey, sign []byte) SignKeyPair {
return &keySign{
key: key,
sign: sign,
}
}
// Returns data from DataSignatureAccumulator for signature creation/verification.
//
// If passed DataSignatureAccumulator provides a SignedDataReader interface, data for signature is obtained
// using this interface for optimization. In this case, it is understood that reading into the slice D
// that the method DataForSignature returns does not change D.
//
// If returned length of data is negative, ErrNegativeLength returns.
func dataForSignature(src SignedDataSource) ([]byte, error) {
if src == nil {
return nil, ErrNilSignedDataSource
}
r, ok := src.(SignedDataReader)
if !ok {
return src.SignedData()
}
buf := bytesPool.Get().([]byte)
if size := r.SignedDataSize(); size < 0 {
return nil, ErrNegativeLength
} else if size <= cap(buf) {
buf = buf[:size]
} else {
buf = make([]byte, size)
}
n, err := r.ReadSignedData(buf)
if err != nil {
return nil, err
}
return buf[:n], nil
}
// DataSignature returns the signature of data obtained using the private key.
//
// If passed data container is nil, ErrNilSignedDataSource returns.
// If passed private key is nil, crypto.ErrEmptyPrivateKey returns.
// If the data container or the signature function returns an error, it is returned directly.
func DataSignature(key *ecdsa.PrivateKey, src SignedDataSource) ([]byte, error) {
if key == nil {
return nil, crypto.ErrEmptyPrivateKey
}
data, err := dataForSignature(src)
if err != nil {
return nil, err
}
defer bytesPool.Put(data)
return crypto.Sign(key, data)
}
// AddSignatureWithKey calculates the data signature and adds it to accumulator with public key.
//
// Any change of data provoke signature breakdown.
//
// Returns signing errors only.
func AddSignatureWithKey(key *ecdsa.PrivateKey, v DataWithSignKeyAccumulator) error {
sign, err := DataSignature(key, v)
if err != nil {
return err
}
v.AddSignKey(sign, &key.PublicKey)
return nil
}
// Checks passed key-signature pairs for data from the passed container.
//
// If passed key-signatures pair set is empty, nil returns immediately.
func verifySignatures(src SignedDataSource, items ...SignKeyPair) error {
if len(items) <= 0 {
return nil
}
data, err := dataForSignature(src)
if err != nil {
return err
}
defer bytesPool.Put(data)
for i := range items {
if i > 0 {
// add previous key bytes to the signed message
signKeyDataSrc := SignKeyPairsSignedData(items[i-1])
signKeyData, err := signKeyDataSrc.SignedData()
if err != nil {
return errors.Wrapf(err, "could not get signed data of key-signature #%d", i)
}
data = append(data, signKeyData...)
}
if err := crypto.Verify(
items[i].GetPublicKey(),
data,
items[i].GetSignature(),
); err != nil {
return err
}
}
return nil
}
// VerifySignatures checks passed key-signature pairs for data from the passed container.
//
// If passed data source is nil, ErrNilSignedDataSource returns.
// If check data is not ready, corresponding error returns.
// If at least one of the pairs is invalid, an error returns.
func VerifySignatures(src SignedDataSource, items ...SignKeyPair) error {
return verifySignatures(src, items...)
}
// VerifyAccumulatedSignatures checks if accumulated key-signature pairs are valid.
//
// Behaves like VerifySignatures.
// If passed key-signature source is empty, ErrNilSignatureKeySource returns.
func VerifyAccumulatedSignatures(src DataWithSignKeySource) error {
if src == nil {
return ErrNilSignatureKeySource
}
return verifySignatures(src, src.GetSignKeyPairs()...)
}
// VerifySignatureWithKey checks data signature from the passed container with passed key.
//
// If passed data with signature is nil, ErrEmptyDataWithSignature returns.
// If passed key is nil, crypto.ErrEmptyPublicKey returns.
// A non-nil error returns if and only if the signature does not pass verification.
func VerifySignatureWithKey(key *ecdsa.PublicKey, src DataWithSignature) error {
if src == nil {
return ErrEmptyDataWithSignature
} else if key == nil {
return crypto.ErrEmptyPublicKey
}
return verifySignatures(
src,
newSignatureKeyPair(
key,
src.GetSignature(),
),
)
}
// SignRequestData calculates request data signature and adds it to accumulator.
//
// Any change of request data provoke signature breakdown.
//
// If passed private key is nil, crypto.ErrEmptyPrivateKey returns.
// If passed RequestSignedData is nil, ErrNilRequestSignedData returns.
func SignRequestData(key *ecdsa.PrivateKey, src RequestSignedData) error {
if src == nil {
return ErrNilRequestSignedData
}
sigSrc, err := GroupSignedPayloads(
src,
src,
NewSignedSessionToken(
src.GetSessionToken(),
),
NewSignedBearerToken(
src.GetBearerToken(),
),
ExtendedHeadersSignedData(src),
SignKeyPairsSignedData(src.GetSignKeyPairs()...),
)
if err != nil {
return err
}
return AddSignatureWithKey(key, sigSrc)
}
// VerifyRequestData checks if accumulated key-signature pairs of data with token are valid.
//
// If passed RequestVerifyData is nil, ErrNilRequestVerifyData returns.
func VerifyRequestData(src RequestVerifyData) error {
if src == nil {
return ErrNilRequestVerifyData
}
verSrc, err := GroupVerifyPayloads(
src,
src,
NewVerifiedSessionToken(
src.GetSessionToken(),
),
NewVerifiedBearerToken(
src.GetBearerToken(),
),
ExtendedHeadersSignedData(src),
)
if err != nil {
return err
}
return VerifyAccumulatedSignatures(verSrc)
}
// SignedData returns payload bytes concatenation from all sources keeping order.
func (s signSourceGroup) SignedData() ([]byte, error) {
chunks := make([][]byte, 0, len(s.sources))
sz := 0
for i := range s.sources {
data, err := s.sources[i].SignedData()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed payload of element #%d", i)
}
chunks = append(chunks, data)
sz += len(data)
}
res := make([]byte, sz)
off := 0
for i := range chunks {
off += copy(res[off:], chunks[i])
}
return res, nil
}
// SignedData returns payload bytes concatenation from all readers.
func (s signReadersGroup) SignedData() ([]byte, error) {
return SignedDataFromReader(s)
}
// SignedDataSize returns the sum of sizes of all readers.
func (s signReadersGroup) SignedDataSize() (sz int) {
for i := range s.readers {
sz += s.readers[i].SignedDataSize()
}
return
}
// ReadSignedData reads data from all readers to passed buffer keeping order.
//
// If the buffer size is insufficient, io.ErrUnexpectedEOF returns.
func (s signReadersGroup) ReadSignedData(p []byte) (int, error) {
sz := s.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
off := 0
for i := range s.readers {
n, err := s.readers[i].ReadSignedData(p[off:])
off += n
if err != nil {
return off, errors.Wrapf(err, "could not read signed payload of element #%d", i)
}
}
return off, nil
}
// GroupSignedPayloads groups SignKeyPairAccumulator and SignedDataSource list to DataWithSignKeyAccumulator.
//
// If passed SignKeyPairAccumulator is nil, ErrNilSignKeyPairAccumulator returns.
//
// Signed payload of the result is a concatenation of payloads of list elements keeping order.
// Nil elements in list are ignored.
//
// If all elements implement SignedDataReader, result implements it too.
func GroupSignedPayloads(acc SignKeyPairAccumulator, sources ...SignedDataSource) (DataWithSignKeyAccumulator, error) {
if acc == nil {
return nil, ErrNilSignKeyPairAccumulator
}
return groupPayloads(acc, nil, sources...), nil
}
// GroupVerifyPayloads groups SignKeyPairSource and SignedDataSource list to DataWithSignKeySource.
//
// If passed SignKeyPairSource is nil, ErrNilSignatureKeySource returns.
//
// Signed payload of the result is a concatenation of payloads of list elements keeping order.
// Nil elements in list are ignored.
//
// If all elements implement SignedDataReader, result implements it too.
func GroupVerifyPayloads(src SignKeyPairSource, sources ...SignedDataSource) (DataWithSignKeySource, error) {
if src == nil {
return nil, ErrNilSignatureKeySource
}
return groupPayloads(nil, src, sources...), nil
}
func groupPayloads(acc SignKeyPairAccumulator, src SignKeyPairSource, sources ...SignedDataSource) interface {
SignedDataSource
SignKeyPairSource
SignKeyPairAccumulator
} {
var allReaders bool
for i := range sources {
if sources[i] == nil {
continue
} else if _, allReaders = sources[i].(SignedDataReader); !allReaders {
break
}
}
if !allReaders {
res := &signSourceGroup{
SignKeyPairSource: src,
SignKeyPairAccumulator: acc,
sources: make([]SignedDataSource, 0, len(sources)),
}
for i := range sources {
if sources[i] != nil {
res.sources = append(res.sources, sources[i])
}
}
return res
}
res := &signReadersGroup{
SignKeyPairSource: src,
SignKeyPairAccumulator: acc,
readers: make([]SignedDataReader, 0, len(sources)),
}
for i := range sources {
if sources[i] != nil {
res.readers = append(res.readers, sources[i].(SignedDataReader))
}
}
return res
}

View file

@ -1,400 +0,0 @@
package service
import (
"crypto/ecdsa"
"crypto/rand"
"errors"
"io"
"testing"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
type testSignedDataSrc struct {
err error
data []byte
token SessionToken
bearer BearerToken
extHdrs []ExtendedHeader
signKeys []SignKeyPair
}
type testSignedDataReader struct {
*testSignedDataSrc
}
func (s testSignedDataSrc) GetSignature() []byte {
if len(s.signKeys) > 0 {
return s.signKeys[0].GetSignature()
}
return nil
}
func (s testSignedDataSrc) GetSignKeyPairs() []SignKeyPair {
return s.signKeys
}
func (s testSignedDataSrc) SignedData() ([]byte, error) {
return s.data, s.err
}
func (s *testSignedDataSrc) AddSignKey(sig []byte, key *ecdsa.PublicKey) {
s.signKeys = append(s.signKeys,
newSignatureKeyPair(key, sig),
)
}
func testData(t *testing.T, sz int) []byte {
d := make([]byte, sz)
_, err := rand.Read(d)
require.NoError(t, err)
return d
}
func (s testSignedDataSrc) GetSessionToken() SessionToken {
return s.token
}
func (s testSignedDataSrc) GetBearerToken() BearerToken {
return s.bearer
}
func (s testSignedDataSrc) ExtendedHeaders() []ExtendedHeader {
return s.extHdrs
}
func (s testSignedDataReader) SignedDataSize() int {
return len(s.data)
}
func (s testSignedDataReader) ReadSignedData(buf []byte) (int, error) {
if s.err != nil {
return 0, s.err
}
var err error
if len(buf) < len(s.data) {
err = io.ErrUnexpectedEOF
}
return copy(buf, s.data), err
}
func TestDataSignature(t *testing.T) {
var err error
// nil private key
_, err = DataSignature(nil, nil)
require.EqualError(t, err, crypto.ErrEmptyPrivateKey.Error())
// create test private key
sk := test.DecodeKey(0)
// nil private key
_, err = DataSignature(sk, nil)
require.EqualError(t, err, ErrNilSignedDataSource.Error())
t.Run("common signed data source", func(t *testing.T) {
// create test data source
src := &testSignedDataSrc{
data: testData(t, 10),
}
// create custom error for data source
src.err = errors.New("test error for data source")
_, err = DataSignature(sk, src)
require.EqualError(t, err, src.err.Error())
// reset error to nil
src.err = nil
// calculate data signature
sig, err := DataSignature(sk, src)
require.NoError(t, err)
// ascertain that the signature passes verification
require.NoError(t, crypto.Verify(&sk.PublicKey, src.data, sig))
})
t.Run("signed data reader", func(t *testing.T) {
// create test signed data reader
src := &testSignedDataSrc{
data: testData(t, 10),
}
// create custom error for signed data reader
src.err = errors.New("test error for signed data reader")
sig, err := DataSignature(sk, src)
require.EqualError(t, err, src.err.Error())
// reset error to nil
src.err = nil
// calculate data signature
sig, err = DataSignature(sk, src)
require.NoError(t, err)
// ascertain that the signature passes verification
require.NoError(t, crypto.Verify(&sk.PublicKey, src.data, sig))
})
}
func TestAddSignatureWithKey(t *testing.T) {
require.NoError(t,
AddSignatureWithKey(
test.DecodeKey(0),
&testSignedDataSrc{
data: testData(t, 10),
},
),
)
}
func TestVerifySignatures(t *testing.T) {
// empty signatures
require.NoError(t, VerifySignatures(nil))
// create test signature source
src := &testSignedDataSrc{
data: testData(t, 10),
}
// create private key for test
sk := test.DecodeKey(0)
// calculate a signature of the data
sig, err := crypto.Sign(sk, src.data)
require.NoError(t, err)
// ascertain that verification is passed
require.NoError(t,
VerifySignatures(
src,
newSignatureKeyPair(&sk.PublicKey, sig),
),
)
// break the signature
sig[0]++
require.Error(t,
VerifySignatures(
src,
newSignatureKeyPair(&sk.PublicKey, sig),
),
)
// restore the signature
sig[0]--
// empty data source
require.EqualError(t,
VerifySignatures(nil, nil),
ErrNilSignedDataSource.Error(),
)
}
func TestVerifyAccumulatedSignatures(t *testing.T) {
// nil signature source
require.EqualError(t,
VerifyAccumulatedSignatures(nil),
ErrNilSignatureKeySource.Error(),
)
// create test private key
sk := test.DecodeKey(0)
signKey := new(RequestVerificationHeader_Signature)
signKey.Peer = crypto.MarshalPublicKey(&sk.PublicKey)
// create signature source
src := &testSignedDataSrc{
data: testData(t, 10),
signKeys: []SignKeyPair{signKey},
}
var err error
// calculate a signature
signKey.Sign, err = crypto.Sign(sk, src.data)
require.NoError(t, err)
// ascertain that verification is passed
require.NoError(t, VerifyAccumulatedSignatures(src))
// break the signature
signKey.Sign[0]++
// ascertain that verification is failed
require.Error(t, VerifyAccumulatedSignatures(src))
}
func TestVerifySignatureWithKey(t *testing.T) {
// nil signature source
require.EqualError(t,
VerifySignatureWithKey(nil, nil),
ErrEmptyDataWithSignature.Error(),
)
signKey := new(RequestVerificationHeader_Signature)
// create test signature source
src := &testSignedDataSrc{
data: testData(t, 10),
signKeys: []SignKeyPair{signKey},
}
// nil public key
require.EqualError(t,
VerifySignatureWithKey(nil, src),
crypto.ErrEmptyPublicKey.Error(),
)
// create test private key
sk := test.DecodeKey(0)
var err error
// calculate a signature
signKey.Sign, err = crypto.Sign(sk, src.data)
require.NoError(t, err)
// ascertain that verification is passed
require.NoError(t, VerifySignatureWithKey(&sk.PublicKey, src))
// break the signature
signKey.Sign[0]++
// ascertain that verification is failed
require.Error(t, VerifySignatureWithKey(&sk.PublicKey, src))
}
func TestSignVerifyRequestData(t *testing.T) {
// sign with empty RequestSignedData
require.EqualError(t,
SignRequestData(nil, nil),
ErrNilRequestSignedData.Error(),
)
// verify with empty RequestVerifyData
require.EqualError(t,
VerifyRequestData(nil),
ErrNilRequestVerifyData.Error(),
)
// create test session token
var (
token = new(Token)
initVerb = Token_Info_Verb(1)
bearer = new(BearerTokenMsg)
bearerEpoch = uint64(8)
extHdrKey = "key"
extHdr = new(RequestExtendedHeader_KV)
)
token.SetVerb(initVerb)
bearer.SetExpirationEpoch(bearerEpoch)
extHdr.SetK(extHdrKey)
// create test data with token
src := &testSignedDataSrc{
data: testData(t, 10),
token: token,
bearer: bearer,
extHdrs: []ExtendedHeader{
wrapExtendedHeaderKV(extHdr),
},
}
// create test private key
sk := test.DecodeKey(0)
// sign with private key
require.NoError(t, SignRequestData(sk, src))
// ascertain that verification is passed
require.NoError(t, VerifyRequestData(src))
// break the data
src.data[0]++
// ascertain that verification is failed
require.Error(t, VerifyRequestData(src))
// restore the data
src.data[0]--
// break the token
token.SetVerb(initVerb + 1)
// ascertain that verification is failed
require.Error(t, VerifyRequestData(src))
// restore the token
token.SetVerb(initVerb)
// ascertain that verification is passed
require.NoError(t, VerifyRequestData(src))
// break the Bearer token
bearer.SetExpirationEpoch(bearerEpoch + 1)
// ascertain that verification is failed
require.Error(t, VerifyRequestData(src))
// restore the Bearer token
bearer.SetExpirationEpoch(bearerEpoch)
// ascertain that verification is passed
require.NoError(t, VerifyRequestData(src))
// break the extended header
extHdr.SetK(extHdrKey + "1")
// ascertain that verification is failed
require.Error(t, VerifyRequestData(src))
// restore the extended header
extHdr.SetK(extHdrKey)
// ascertain that verification is passed
require.NoError(t, VerifyRequestData(src))
// wrap to data reader
rdr := &testSignedDataReader{
testSignedDataSrc: src,
}
// sign with private key
require.NoError(t, SignRequestData(sk, rdr))
// ascertain that verification is passed
require.NoError(t, VerifyRequestData(rdr))
if len(rdr.GetSignKeyPairs()) < 2 {
// add one more signature
require.NoError(t, SignRequestData(test.DecodeKey(1), rdr))
}
// change key-signature order
rdr.signKeys[0], rdr.signKeys[1] = rdr.signKeys[1], rdr.signKeys[0]
// ascertain that verification is failed
require.Error(t, VerifyRequestData(src))
}

View file

@ -1,265 +0,0 @@
package service
import (
"crypto/ecdsa"
"encoding/binary"
"io"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
)
type signAccumWithToken struct {
SignedDataSource
SignKeyPairAccumulator
SignKeyPairSource
token SessionToken
}
type signDataReaderWithToken struct {
SignedDataSource
SignKeyPairAccumulator
SignKeyPairSource
rdr SignedDataReader
token SessionToken
}
type signedSessionToken struct {
SessionToken
}
const verbSize = 4
const fixedTokenDataSize = 0 +
refs.UUIDSize +
refs.OwnerIDSize +
verbSize +
refs.UUIDSize +
refs.CIDSize +
8 +
8
var tokenEndianness = binary.BigEndian
// GetID is an ID field getter.
func (m Token_Info) GetID() TokenID {
return m.ID
}
// SetID is an ID field setter.
func (m *Token_Info) SetID(id TokenID) {
m.ID = id
}
// GetOwnerID is an OwnerID field getter.
func (m Token_Info) GetOwnerID() OwnerID {
return m.OwnerID
}
// SetOwnerID is an OwnerID field setter.
func (m *Token_Info) SetOwnerID(id OwnerID) {
m.OwnerID = id
}
// SetVerb is a Verb field setter.
func (m *Token_Info) SetVerb(verb Token_Info_Verb) {
m.Verb = verb
}
// GetAddress is an Address field getter.
func (m Token_Info) GetAddress() Address {
return m.Address
}
// SetAddress is an Address field setter.
func (m *Token_Info) SetAddress(addr Address) {
m.Address = addr
}
// CreationEpoch is a Created field getter.
func (m TokenLifetime) CreationEpoch() uint64 {
return m.Created
}
// SetCreationEpoch is a Created field setter.
func (m *TokenLifetime) SetCreationEpoch(e uint64) {
m.Created = e
}
// ExpirationEpoch is a ValidUntil field getter.
func (m TokenLifetime) ExpirationEpoch() uint64 {
return m.ValidUntil
}
// SetExpirationEpoch is a ValidUntil field setter.
func (m *TokenLifetime) SetExpirationEpoch(e uint64) {
m.ValidUntil = e
}
// SetSessionKey is a SessionKey field setter.
func (m *Token_Info) SetSessionKey(key []byte) {
m.SessionKey = key
}
// SetOwnerKey is an OwnerKey field setter.
func (m *Token_Info) SetOwnerKey(key []byte) {
m.OwnerKey = key
}
// SetSignature is a Signature field setter.
func (m *Token) SetSignature(sig []byte) {
m.Signature = sig
}
// Size returns the size of a binary representation of the verb.
func (x Token_Info_Verb) Size() int {
return verbSize
}
// Bytes returns a binary representation of the verb.
func (x Token_Info_Verb) Bytes() []byte {
data := make([]byte, verbSize)
tokenEndianness.PutUint32(data, uint32(x))
return data
}
// AddSignKey calls a Signature field setter and an OwnerKey field setter with corresponding arguments.
func (s signedSessionToken) AddSignKey(sig []byte, key *ecdsa.PublicKey) {
if s.SessionToken != nil {
s.SessionToken.SetSignature(sig)
s.SessionToken.SetOwnerKey(
crypto.MarshalPublicKey(key),
)
}
}
// SignedData returns token information in a binary representation.
func (s signedSessionToken) SignedData() ([]byte, error) {
return SignedDataFromReader(s)
}
// SignedDataSize returns the length of signed token information slice.
func (s signedSessionToken) SignedDataSize() int {
return tokenInfoSize(s.SessionToken)
}
// ReadSignedData copies a binary representation of the token information to passed buffer.
//
// If buffer length is less than required, io.ErrUnexpectedEOF returns.
func (s signedSessionToken) ReadSignedData(p []byte) (int, error) {
sz := s.SignedDataSize()
if len(p) < sz {
return 0, io.ErrUnexpectedEOF
}
copyTokenSignedData(p, s.SessionToken)
return sz, nil
}
// NewSignedSessionToken wraps passed SessionToken in a component suitable for signing.
//
// Result can be used in AddSignatureWithKey function.
func NewSignedSessionToken(token SessionToken) DataWithSignKeyAccumulator {
return &signedSessionToken{
SessionToken: token,
}
}
// NewVerifiedSessionToken wraps passed SessionToken in a component suitable for signature verification.
//
// Result can be used in VerifySignatureWithKey function.
func NewVerifiedSessionToken(token SessionToken) DataWithSignature {
return &signedSessionToken{
SessionToken: token,
}
}
func tokenInfoSize(v SessionKeySource) int {
if v == nil {
return 0
}
return fixedTokenDataSize + len(v.GetSessionKey())
}
// Fills passed buffer with signing token information bytes.
// Does not check buffer length, it is understood that enough space is allocated in it.
//
// If passed SessionTokenInfo, buffer remains unchanged.
func copyTokenSignedData(buf []byte, token SessionTokenInfo) {
if token == nil {
return
}
var off int
off += copy(buf[off:], token.GetID().Bytes())
off += copy(buf[off:], token.GetOwnerID().Bytes())
off += copy(buf[off:], token.GetVerb().Bytes())
addr := token.GetAddress()
off += copy(buf[off:], addr.CID.Bytes())
off += copy(buf[off:], addr.ObjectID.Bytes())
tokenEndianness.PutUint64(buf[off:], token.CreationEpoch())
off += 8
tokenEndianness.PutUint64(buf[off:], token.ExpirationEpoch())
off += 8
copy(buf[off:], token.GetSessionKey())
}
// SignedData concatenates signed data with session token information. Returns concatenation result.
//
// Token bytes are added if and only if token is not nil.
func (s signAccumWithToken) SignedData() ([]byte, error) {
data, err := s.SignedDataSource.SignedData()
if err != nil {
return nil, err
}
tokenData := make([]byte, tokenInfoSize(s.token))
copyTokenSignedData(tokenData, s.token)
return append(data, tokenData...), nil
}
func (s signDataReaderWithToken) SignedDataSize() int {
sz := s.rdr.SignedDataSize()
if sz < 0 {
return -1
}
sz += tokenInfoSize(s.token)
return sz
}
func (s signDataReaderWithToken) ReadSignedData(p []byte) (int, error) {
dataSize := s.rdr.SignedDataSize()
if dataSize < 0 {
return 0, ErrNegativeLength
}
sumSize := dataSize + tokenInfoSize(s.token)
if len(p) < sumSize {
return 0, io.ErrUnexpectedEOF
}
if n, err := s.rdr.ReadSignedData(p); err != nil {
return n, err
}
copyTokenSignedData(p[dataSize:], s.token)
return sumSize, nil
}

View file

@ -1,248 +0,0 @@
package service
import (
"crypto/rand"
"testing"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
func TestTokenGettersSetters(t *testing.T) {
var tok SessionToken = new(Token)
{ // ID
id, err := refs.NewUUID()
require.NoError(t, err)
tok.SetID(id)
require.Equal(t, id, tok.GetID())
}
{ // OwnerID
ownerID := OwnerID{}
_, err := rand.Read(ownerID[:])
require.NoError(t, err)
tok.SetOwnerID(ownerID)
require.Equal(t, ownerID, tok.GetOwnerID())
}
{ // Verb
verb := Token_Info_Verb(3)
tok.SetVerb(verb)
require.Equal(t, verb, tok.GetVerb())
}
{ // Address
addr := Address{}
_, err := rand.Read(addr.CID[:])
require.NoError(t, err)
_, err = rand.Read(addr.ObjectID[:])
require.NoError(t, err)
tok.SetAddress(addr)
require.Equal(t, addr, tok.GetAddress())
}
{ // Created
e := uint64(5)
tok.SetCreationEpoch(e)
require.Equal(t, e, tok.CreationEpoch())
}
{ // ValidUntil
e := uint64(5)
tok.SetExpirationEpoch(e)
require.Equal(t, e, tok.ExpirationEpoch())
}
{ // SessionKey
key := make([]byte, 10)
_, err := rand.Read(key)
require.NoError(t, err)
tok.SetSessionKey(key)
require.Equal(t, key, tok.GetSessionKey())
}
{ // Signature
sig := make([]byte, 10)
_, err := rand.Read(sig)
require.NoError(t, err)
tok.SetSignature(sig)
require.Equal(t, sig, tok.GetSignature())
}
}
func TestSignToken(t *testing.T) {
var token SessionToken = new(Token)
// create private key for signing
sk := test.DecodeKey(0)
pk := &sk.PublicKey
id := TokenID{}
_, err := rand.Read(id[:])
require.NoError(t, err)
token.SetID(id)
ownerID := OwnerID{}
_, err = rand.Read(ownerID[:])
require.NoError(t, err)
token.SetOwnerID(ownerID)
verb := Token_Info_Verb(1)
token.SetVerb(verb)
addr := Address{}
_, err = rand.Read(addr.ObjectID[:])
require.NoError(t, err)
_, err = rand.Read(addr.CID[:])
require.NoError(t, err)
token.SetAddress(addr)
cEpoch := uint64(1)
token.SetCreationEpoch(cEpoch)
fEpoch := uint64(2)
token.SetExpirationEpoch(fEpoch)
sessionKey := make([]byte, 10)
_, err = rand.Read(sessionKey[:])
require.NoError(t, err)
token.SetSessionKey(sessionKey)
signedToken := NewSignedSessionToken(token)
verifiedToken := NewVerifiedSessionToken(token)
// sign and verify token
require.NoError(t, AddSignatureWithKey(sk, signedToken))
require.NoError(t, VerifySignatureWithKey(pk, verifiedToken))
items := []struct {
corrupt func()
restore func()
}{
{ // ID
corrupt: func() {
id[0]++
token.SetID(id)
},
restore: func() {
id[0]--
token.SetID(id)
},
},
{ // Owner ID
corrupt: func() {
ownerID[0]++
token.SetOwnerID(ownerID)
},
restore: func() {
ownerID[0]--
token.SetOwnerID(ownerID)
},
},
{ // Verb
corrupt: func() {
token.SetVerb(verb + 1)
},
restore: func() {
token.SetVerb(verb)
},
},
{ // ObjectID
corrupt: func() {
addr.ObjectID[0]++
token.SetAddress(addr)
},
restore: func() {
addr.ObjectID[0]--
token.SetAddress(addr)
},
},
{ // CID
corrupt: func() {
addr.CID[0]++
token.SetAddress(addr)
},
restore: func() {
addr.CID[0]--
token.SetAddress(addr)
},
},
{ // Creation epoch
corrupt: func() {
token.SetCreationEpoch(cEpoch + 1)
},
restore: func() {
token.SetCreationEpoch(cEpoch)
},
},
{ // Expiration epoch
corrupt: func() {
token.SetExpirationEpoch(fEpoch + 1)
},
restore: func() {
token.SetExpirationEpoch(fEpoch)
},
},
{ // Session key
corrupt: func() {
sessionKey[0]++
token.SetSessionKey(sessionKey)
},
restore: func() {
sessionKey[0]--
token.SetSessionKey(sessionKey)
},
},
}
for _, v := range items {
v.corrupt()
require.Error(t, VerifySignatureWithKey(pk, verifiedToken))
v.restore()
require.NoError(t, VerifySignatureWithKey(pk, verifiedToken))
}
}
func TestSignedSessionToken_AddSignKey(t *testing.T) {
// nil SessionToken
s := new(signedSessionToken)
require.NotPanics(t, func() {
s.AddSignKey(nil, nil)
})
// create test public key and signature
pk := &test.DecodeKey(0).PublicKey
sig := []byte{1, 2, 3}
s.SessionToken = new(Token)
// add key-signature pair to SessionToken
s.AddSignKey(sig, pk)
require.Equal(t, sig, s.GetSignature())
require.Equal(t,
crypto.MarshalPublicKey(pk),
s.GetOwnerKey(),
)
}

Some files were not shown because too many files have changed in this diff Show more