diff --git a/api/netmap/types.go b/api/netmap/types.go index 877357de..b5559724 100644 --- a/api/netmap/types.go +++ b/api/netmap/types.go @@ -1,6 +1,10 @@ package netmap import ( + "bytes" + "iter" + "slices" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) @@ -382,6 +386,18 @@ func (a *Attribute) SetParents(parent []string) { a.parents = parent } +// Clone returns a copy of Attribute. +func (a *Attribute) Clone() *Attribute { + if a == nil { + return nil + } + return &Attribute{ + parents: slices.Clone(a.parents), + value: a.value, + key: a.key, + } +} + func (ni *NodeInfo) GetPublicKey() []byte { if ni != nil { return ni.publicKey @@ -394,25 +410,6 @@ func (ni *NodeInfo) SetPublicKey(v []byte) { ni.publicKey = v } -// GetAddress returns node's network address. -// -// Deprecated: use IterateAddresses. -func (ni *NodeInfo) GetAddress() (addr string) { - ni.IterateAddresses(func(s string) bool { - addr = s - return true - }) - - return -} - -// SetAddress sets node's network address. -// -// Deprecated: use SetAddresses. -func (ni *NodeInfo) SetAddress(v string) { - ni.SetAddresses(v) -} - // SetAddresses sets list of network addresses of the node. func (ni *NodeInfo) SetAddresses(v ...string) { ni.addresses = v @@ -427,10 +424,23 @@ func (ni *NodeInfo) NumberOfAddresses() int { return 0 } +// Addresses returns an iterator over network addresses of the node. +func (ni NodeInfo) Addresses() iter.Seq[string] { + return func(yield func(string) bool) { + for i := range ni.addresses { + if !yield(ni.addresses[i]) { + break + } + } + } +} + // IterateAddresses iterates over network addresses of the node. // Breaks iteration on f's true return. // // Handler should not be nil. +// +// Deprecated: use [NodeInfo.Addresses] instead. func (ni *NodeInfo) IterateAddresses(f func(string) bool) { if ni != nil { for i := range ni.addresses { @@ -465,6 +475,23 @@ func (ni *NodeInfo) SetState(state NodeState) { ni.state = state } +// Clone returns a copy of NodeInfo. +func (ni *NodeInfo) Clone() *NodeInfo { + if ni == nil { + return nil + } + dst := NodeInfo{ + addresses: slices.Clone(ni.addresses), + publicKey: bytes.Clone(ni.publicKey), + state: ni.state, + attributes: make([]Attribute, len(ni.attributes)), + } + for i, v := range ni.attributes { + dst.attributes[i] = *v.Clone() + } + return &dst +} + func (l *LocalNodeInfoResponseBody) GetVersion() *refs.Version { if l != nil { return l.version @@ -550,6 +577,8 @@ type NetworkConfig struct { } // NumberOfParameters returns number of network parameters. +// +// Deprecated: use [NetworkConfig.Parameters] instead. func (x *NetworkConfig) NumberOfParameters() int { if x != nil { return len(x.ps) @@ -558,10 +587,20 @@ func (x *NetworkConfig) NumberOfParameters() int { return 0 } +// Parameters returns an iterator over network parameters. +func (x *NetworkConfig) Parameters() []NetworkParameter { + if x != nil { + return x.ps + } + return nil +} + // IterateParameters iterates over network parameters. // Breaks iteration on f's true return. // // Handler must not be nil. +// +// Deprecated: use [NetworkConfig.Parameters] instead. func (x *NetworkConfig) IterateParameters(f func(*NetworkParameter) bool) { if x != nil { for i := range x.ps { diff --git a/api/netmap/types_test.go b/api/netmap/types_test.go new file mode 100644 index 00000000..47d9d7b2 --- /dev/null +++ b/api/netmap/types_test.go @@ -0,0 +1,48 @@ +package netmap + +import ( + "bytes" + "slices" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNodeInfo_Clone(t *testing.T) { + var ni NodeInfo + ni.publicKey = []byte{2} + attr := Attribute{ + key: "key", + value: "value", + parents: []string{"parent", "parent2"}, + } + ni.attributes = []Attribute{attr} + ni.addresses = []string{"5", "6"} + + c := ni.Clone() + + require.True(t, c != &ni) + require.True(t, bytes.Equal(c.publicKey, ni.publicKey)) + require.True(t, &(c.publicKey[0]) != &(ni.publicKey[0])) + require.True(t, &(c.attributes[0]) != &(ni.attributes[0])) + require.True(t, slices.Compare(c.addresses, ni.addresses) == 0) + require.True(t, &(c.addresses[0]) != &(ni.addresses[0])) +} + +func TestAttribute_Clone(t *testing.T) { + attr := Attribute{ + key: "key", + value: "value", + parents: []string{"parent1", "parent2"}, + } + + c := attr.Clone() + + require.True(t, c != &attr) + require.True(t, c.key == attr.key) + require.True(t, &(c.key) != &(attr.key)) + require.True(t, &(c.value) != &(attr.value)) + require.True(t, c.value == attr.value) + require.True(t, &(c.parents[0]) != &(attr.parents[0])) + require.True(t, slices.Compare(c.parents, attr.parents) == 0) +} diff --git a/api/object/convert.go b/api/object/convert.go index 016a367a..90e40dc3 100644 --- a/api/object/convert.go +++ b/api/object/convert.go @@ -2389,6 +2389,7 @@ func (r *PatchRequestBody) ToGRPCMessage() grpc.Message { m.SetNewAttributes(AttributesToGRPC(r.newAttributes)) m.SetReplaceAttributes(r.replaceAttributes) m.SetPatch(r.patch.ToGRPCMessage().(*object.PatchRequest_Body_Patch)) + m.SetNewSplitHeader(r.newSplitHeader.ToGRPCMessage().(*object.Header_Split)) } return m @@ -2437,6 +2438,20 @@ func (r *PatchRequestBody) FromGRPCMessage(m grpc.Message) error { } } + newSplitHeader := v.GetNewSplitHeader() + if newSplitHeader == nil { + r.newSplitHeader = nil + } else { + if r.newSplitHeader == nil { + r.newSplitHeader = new(SplitHeader) + } + + err = r.newSplitHeader.FromGRPCMessage(newSplitHeader) + if err != nil { + return err + } + } + return nil } diff --git a/api/object/grpc/service.pb.go b/api/object/grpc/service.pb.go index 96034670..8926f5cb 100644 --- a/api/object/grpc/service.pb.go +++ b/api/object/grpc/service.pb.go @@ -5181,6 +5181,9 @@ type PatchRequest_Body struct { // merged. If the incoming `new_attributes` list contains already existing // key, then it just replaces it while merging the lists. ReplaceAttributes *bool `protobuf:"varint,3,opt,name=replace_attributes,json=replaceAttributes" json:"replace_attributes,omitempty"` + // New split header for the object. This defines how the object will relate + // to other objects in a split operation. + NewSplitHeader *Header_Split `protobuf:"bytes,5,opt,name=new_split_header,json=newSplitHeader" json:"new_split_header,omitempty"` // The patch that is applied for the object. Patch *PatchRequest_Body_Patch `protobuf:"bytes,4,opt,name=patch" json:"patch,omitempty"` unknownFields protoimpl.UnknownFields @@ -5233,6 +5236,13 @@ func (x *PatchRequest_Body) GetReplaceAttributes() bool { return false } +func (x *PatchRequest_Body) GetNewSplitHeader() *Header_Split { + if x != nil { + return x.NewSplitHeader + } + return nil +} + func (x *PatchRequest_Body) GetPatch() *PatchRequest_Body_Patch { if x != nil { return x.Patch @@ -5252,6 +5262,10 @@ func (x *PatchRequest_Body) SetReplaceAttributes(v bool) { x.ReplaceAttributes = &v } +func (x *PatchRequest_Body) SetNewSplitHeader(v *Header_Split) { + x.NewSplitHeader = v +} + func (x *PatchRequest_Body) SetPatch(v *PatchRequest_Body_Patch) { x.Patch = v } @@ -5270,6 +5284,13 @@ func (x *PatchRequest_Body) HasReplaceAttributes() bool { return x.ReplaceAttributes != nil } +func (x *PatchRequest_Body) HasNewSplitHeader() bool { + if x == nil { + return false + } + return x.NewSplitHeader != nil +} + func (x *PatchRequest_Body) HasPatch() bool { if x == nil { return false @@ -5285,6 +5306,10 @@ func (x *PatchRequest_Body) ClearReplaceAttributes() { x.ReplaceAttributes = nil } +func (x *PatchRequest_Body) ClearNewSplitHeader() { + x.NewSplitHeader = nil +} + func (x *PatchRequest_Body) ClearPatch() { x.Patch = nil } @@ -5305,6 +5330,9 @@ type PatchRequest_Body_builder struct { // merged. If the incoming `new_attributes` list contains already existing // key, then it just replaces it while merging the lists. ReplaceAttributes *bool + // New split header for the object. This defines how the object will relate + // to other objects in a split operation. + NewSplitHeader *Header_Split // The patch that is applied for the object. Patch *PatchRequest_Body_Patch } @@ -5316,6 +5344,7 @@ func (b0 PatchRequest_Body_builder) Build() *PatchRequest_Body { x.Address = b.Address x.NewAttributes = b.NewAttributes x.ReplaceAttributes = b.ReplaceAttributes + x.NewSplitHeader = b.NewSplitHeader x.Patch = b.Patch return m0 } @@ -5902,7 +5931,7 @@ var file_api_object_grpc_service_proto_rawDesc = []byte{ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x06, 0x0a, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xb3, 0x04, 0x0a, 0x0c, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xfd, 0x04, 0x0a, 0x0c, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, @@ -5916,7 +5945,7 @@ var file_api_object_grpc_service_proto_rawDesc = []byte{ 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xcf, 0x02, 0x0a, 0x04, 0x42, 0x6f, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x99, 0x03, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, @@ -5928,87 +5957,92 @@ var file_api_object_grpc_service_proto_rawDesc = []byte{ 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, - 0x3f, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, - 0x1a, 0x59, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0c, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xa4, 0x02, 0x0a, 0x0d, - 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, - 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, - 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, - 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, - 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x48, 0x0a, 0x10, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x2e, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x52, 0x0e, 0x6e, 0x65, 0x77, 0x53, 0x70, + 0x6c, 0x69, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x05, 0x70, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x50, 0x61, + 0x74, 0x63, 0x68, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x59, 0x0a, 0x05, 0x50, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xa4, 0x02, 0x0a, 0x0d, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x49, 0x64, 0x32, 0xd4, 0x05, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, - 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, - 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x44, 0x0a, 0x03, 0x50, 0x75, - 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, + 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x3d, 0x0a, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x44, 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x32, 0xd4, 0x05, 0x0a, + 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, + 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x30, 0x01, 0x12, 0x44, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, + 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, 0x4b, 0x0a, 0x06, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x04, 0x48, 0x65, 0x61, 0x64, 0x12, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x12, 0x4b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, - 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, - 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, - 0x04, 0x48, 0x65, 0x61, 0x64, 0x12, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, - 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, - 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x1f, + 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x30, 0x01, 0x12, 0x53, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x21, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x5d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, - 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x09, 0x50, 0x75, 0x74, 0x53, 0x69, - 0x6e, 0x67, 0x6c, 0x65, 0x12, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, - 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, - 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, - 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, - 0x05, 0x50, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, - 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, - 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x42, 0x62, 0x5a, 0x43, 0x67, 0x69, 0x74, - 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, - 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, - 0x66, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x62, 0x08, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x70, 0xe8, 0x07, + 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, + 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, + 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x53, 0x0a, + 0x08, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x12, 0x5d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x54, 0x0a, 0x09, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x12, 0x22, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x28, 0x01, 0x42, 0x62, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, + 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x73, 0x64, 0x6b, + 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x3b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x62, 0x08, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x70, 0xe8, 0x07, } var file_api_object_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 42) @@ -6071,6 +6105,7 @@ var file_api_object_grpc_service_proto_goTypes = []any{ (grpc1.ChecksumType)(0), // 55: neo.fs.v2.refs.ChecksumType (*Object)(nil), // 56: neo.fs.v2.object.Object (*Header_Attribute)(nil), // 57: neo.fs.v2.object.Header.Attribute + (*Header_Split)(nil), // 58: neo.fs.v2.object.Header.Split } var file_api_object_grpc_service_proto_depIdxs = []int32{ 20, // 0: neo.fs.v2.object.GetRequest.body:type_name -> neo.fs.v2.object.GetRequest.Body @@ -6163,32 +6198,33 @@ var file_api_object_grpc_service_proto_depIdxs = []int32{ 56, // 87: neo.fs.v2.object.PutSingleRequest.Body.object:type_name -> neo.fs.v2.object.Object 48, // 88: neo.fs.v2.object.PatchRequest.Body.address:type_name -> neo.fs.v2.refs.Address 57, // 89: neo.fs.v2.object.PatchRequest.Body.new_attributes:type_name -> neo.fs.v2.object.Header.Attribute - 40, // 90: neo.fs.v2.object.PatchRequest.Body.patch:type_name -> neo.fs.v2.object.PatchRequest.Body.Patch - 11, // 91: neo.fs.v2.object.PatchRequest.Body.Patch.source_range:type_name -> neo.fs.v2.object.Range - 51, // 92: neo.fs.v2.object.PatchResponse.Body.object_id:type_name -> neo.fs.v2.refs.ObjectID - 0, // 93: neo.fs.v2.object.ObjectService.Get:input_type -> neo.fs.v2.object.GetRequest - 2, // 94: neo.fs.v2.object.ObjectService.Put:input_type -> neo.fs.v2.object.PutRequest - 4, // 95: neo.fs.v2.object.ObjectService.Delete:input_type -> neo.fs.v2.object.DeleteRequest - 6, // 96: neo.fs.v2.object.ObjectService.Head:input_type -> neo.fs.v2.object.HeadRequest - 9, // 97: neo.fs.v2.object.ObjectService.Search:input_type -> neo.fs.v2.object.SearchRequest - 12, // 98: neo.fs.v2.object.ObjectService.GetRange:input_type -> neo.fs.v2.object.GetRangeRequest - 14, // 99: neo.fs.v2.object.ObjectService.GetRangeHash:input_type -> neo.fs.v2.object.GetRangeHashRequest - 16, // 100: neo.fs.v2.object.ObjectService.PutSingle:input_type -> neo.fs.v2.object.PutSingleRequest - 18, // 101: neo.fs.v2.object.ObjectService.Patch:input_type -> neo.fs.v2.object.PatchRequest - 1, // 102: neo.fs.v2.object.ObjectService.Get:output_type -> neo.fs.v2.object.GetResponse - 3, // 103: neo.fs.v2.object.ObjectService.Put:output_type -> neo.fs.v2.object.PutResponse - 5, // 104: neo.fs.v2.object.ObjectService.Delete:output_type -> neo.fs.v2.object.DeleteResponse - 8, // 105: neo.fs.v2.object.ObjectService.Head:output_type -> neo.fs.v2.object.HeadResponse - 10, // 106: neo.fs.v2.object.ObjectService.Search:output_type -> neo.fs.v2.object.SearchResponse - 13, // 107: neo.fs.v2.object.ObjectService.GetRange:output_type -> neo.fs.v2.object.GetRangeResponse - 15, // 108: neo.fs.v2.object.ObjectService.GetRangeHash:output_type -> neo.fs.v2.object.GetRangeHashResponse - 17, // 109: neo.fs.v2.object.ObjectService.PutSingle:output_type -> neo.fs.v2.object.PutSingleResponse - 19, // 110: neo.fs.v2.object.ObjectService.Patch:output_type -> neo.fs.v2.object.PatchResponse - 102, // [102:111] is the sub-list for method output_type - 93, // [93:102] is the sub-list for method input_type - 93, // [93:93] is the sub-list for extension type_name - 93, // [93:93] is the sub-list for extension extendee - 0, // [0:93] is the sub-list for field type_name + 58, // 90: neo.fs.v2.object.PatchRequest.Body.new_split_header:type_name -> neo.fs.v2.object.Header.Split + 40, // 91: neo.fs.v2.object.PatchRequest.Body.patch:type_name -> neo.fs.v2.object.PatchRequest.Body.Patch + 11, // 92: neo.fs.v2.object.PatchRequest.Body.Patch.source_range:type_name -> neo.fs.v2.object.Range + 51, // 93: neo.fs.v2.object.PatchResponse.Body.object_id:type_name -> neo.fs.v2.refs.ObjectID + 0, // 94: neo.fs.v2.object.ObjectService.Get:input_type -> neo.fs.v2.object.GetRequest + 2, // 95: neo.fs.v2.object.ObjectService.Put:input_type -> neo.fs.v2.object.PutRequest + 4, // 96: neo.fs.v2.object.ObjectService.Delete:input_type -> neo.fs.v2.object.DeleteRequest + 6, // 97: neo.fs.v2.object.ObjectService.Head:input_type -> neo.fs.v2.object.HeadRequest + 9, // 98: neo.fs.v2.object.ObjectService.Search:input_type -> neo.fs.v2.object.SearchRequest + 12, // 99: neo.fs.v2.object.ObjectService.GetRange:input_type -> neo.fs.v2.object.GetRangeRequest + 14, // 100: neo.fs.v2.object.ObjectService.GetRangeHash:input_type -> neo.fs.v2.object.GetRangeHashRequest + 16, // 101: neo.fs.v2.object.ObjectService.PutSingle:input_type -> neo.fs.v2.object.PutSingleRequest + 18, // 102: neo.fs.v2.object.ObjectService.Patch:input_type -> neo.fs.v2.object.PatchRequest + 1, // 103: neo.fs.v2.object.ObjectService.Get:output_type -> neo.fs.v2.object.GetResponse + 3, // 104: neo.fs.v2.object.ObjectService.Put:output_type -> neo.fs.v2.object.PutResponse + 5, // 105: neo.fs.v2.object.ObjectService.Delete:output_type -> neo.fs.v2.object.DeleteResponse + 8, // 106: neo.fs.v2.object.ObjectService.Head:output_type -> neo.fs.v2.object.HeadResponse + 10, // 107: neo.fs.v2.object.ObjectService.Search:output_type -> neo.fs.v2.object.SearchResponse + 13, // 108: neo.fs.v2.object.ObjectService.GetRange:output_type -> neo.fs.v2.object.GetRangeResponse + 15, // 109: neo.fs.v2.object.ObjectService.GetRangeHash:output_type -> neo.fs.v2.object.GetRangeHashResponse + 17, // 110: neo.fs.v2.object.ObjectService.PutSingle:output_type -> neo.fs.v2.object.PutSingleResponse + 19, // 111: neo.fs.v2.object.ObjectService.Patch:output_type -> neo.fs.v2.object.PatchResponse + 103, // [103:112] is the sub-list for method output_type + 94, // [94:103] is the sub-list for method input_type + 94, // [94:94] is the sub-list for extension type_name + 94, // [94:94] is the sub-list for extension extendee + 0, // [0:94] is the sub-list for field type_name } func init() { file_api_object_grpc_service_proto_init() } diff --git a/api/object/grpc/service_protoopaque.pb.go b/api/object/grpc/service_protoopaque.pb.go index 3a389a18..8c74ef04 100644 --- a/api/object/grpc/service_protoopaque.pb.go +++ b/api/object/grpc/service_protoopaque.pb.go @@ -5050,6 +5050,7 @@ type PatchRequest_Body struct { xxx_hidden_Address *grpc1.Address `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` xxx_hidden_NewAttributes *[]*Header_Attribute `protobuf:"bytes,2,rep,name=new_attributes,json=newAttributes" json:"new_attributes,omitempty"` xxx_hidden_ReplaceAttributes bool `protobuf:"varint,3,opt,name=replace_attributes,json=replaceAttributes" json:"replace_attributes,omitempty"` + xxx_hidden_NewSplitHeader *Header_Split `protobuf:"bytes,5,opt,name=new_split_header,json=newSplitHeader" json:"new_split_header,omitempty"` xxx_hidden_Patch *PatchRequest_Body_Patch `protobuf:"bytes,4,opt,name=patch" json:"patch,omitempty"` XXX_raceDetectHookData protoimpl.RaceDetectHookData XXX_presence [1]uint32 @@ -5105,6 +5106,13 @@ func (x *PatchRequest_Body) GetReplaceAttributes() bool { return false } +func (x *PatchRequest_Body) GetNewSplitHeader() *Header_Split { + if x != nil { + return x.xxx_hidden_NewSplitHeader + } + return nil +} + func (x *PatchRequest_Body) GetPatch() *PatchRequest_Body_Patch { if x != nil { return x.xxx_hidden_Patch @@ -5122,7 +5130,11 @@ func (x *PatchRequest_Body) SetNewAttributes(v []*Header_Attribute) { func (x *PatchRequest_Body) SetReplaceAttributes(v bool) { x.xxx_hidden_ReplaceAttributes = v - protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 4) + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 5) +} + +func (x *PatchRequest_Body) SetNewSplitHeader(v *Header_Split) { + x.xxx_hidden_NewSplitHeader = v } func (x *PatchRequest_Body) SetPatch(v *PatchRequest_Body_Patch) { @@ -5143,6 +5155,13 @@ func (x *PatchRequest_Body) HasReplaceAttributes() bool { return protoimpl.X.Present(&(x.XXX_presence[0]), 2) } +func (x *PatchRequest_Body) HasNewSplitHeader() bool { + if x == nil { + return false + } + return x.xxx_hidden_NewSplitHeader != nil +} + func (x *PatchRequest_Body) HasPatch() bool { if x == nil { return false @@ -5159,6 +5178,10 @@ func (x *PatchRequest_Body) ClearReplaceAttributes() { x.xxx_hidden_ReplaceAttributes = false } +func (x *PatchRequest_Body) ClearNewSplitHeader() { + x.xxx_hidden_NewSplitHeader = nil +} + func (x *PatchRequest_Body) ClearPatch() { x.xxx_hidden_Patch = nil } @@ -5179,6 +5202,9 @@ type PatchRequest_Body_builder struct { // merged. If the incoming `new_attributes` list contains already existing // key, then it just replaces it while merging the lists. ReplaceAttributes *bool + // New split header for the object. This defines how the object will relate + // to other objects in a split operation. + NewSplitHeader *Header_Split // The patch that is applied for the object. Patch *PatchRequest_Body_Patch } @@ -5190,9 +5216,10 @@ func (b0 PatchRequest_Body_builder) Build() *PatchRequest_Body { x.xxx_hidden_Address = b.Address x.xxx_hidden_NewAttributes = &b.NewAttributes if b.ReplaceAttributes != nil { - protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 4) + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 5) x.xxx_hidden_ReplaceAttributes = *b.ReplaceAttributes } + x.xxx_hidden_NewSplitHeader = b.NewSplitHeader x.xxx_hidden_Patch = b.Patch return m0 } @@ -5779,7 +5806,7 @@ var file_api_object_grpc_service_proto_rawDesc = []byte{ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x06, 0x0a, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xb3, 0x04, 0x0a, 0x0c, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xfd, 0x04, 0x0a, 0x0c, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, @@ -5793,7 +5820,7 @@ var file_api_object_grpc_service_proto_rawDesc = []byte{ 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xcf, 0x02, 0x0a, 0x04, 0x42, 0x6f, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x99, 0x03, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, @@ -5805,87 +5832,92 @@ var file_api_object_grpc_service_proto_rawDesc = []byte{ 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, - 0x3f, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, - 0x1a, 0x59, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0c, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xa4, 0x02, 0x0a, 0x0d, - 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, - 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, - 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, - 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, - 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x48, 0x0a, 0x10, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x2e, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x52, 0x0e, 0x6e, 0x65, 0x77, 0x53, 0x70, + 0x6c, 0x69, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x05, 0x70, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x50, 0x61, + 0x74, 0x63, 0x68, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x59, 0x0a, 0x05, 0x50, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xa4, 0x02, 0x0a, 0x0d, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x49, 0x64, 0x32, 0xd4, 0x05, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, - 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, - 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x44, 0x0a, 0x03, 0x50, 0x75, - 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, + 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x3d, 0x0a, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x44, 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x32, 0xd4, 0x05, 0x0a, + 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, + 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x30, 0x01, 0x12, 0x44, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, + 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, 0x4b, 0x0a, 0x06, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x04, 0x48, 0x65, 0x61, 0x64, 0x12, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x12, 0x4b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, - 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, - 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, - 0x04, 0x48, 0x65, 0x61, 0x64, 0x12, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, - 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, - 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x1f, + 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x30, 0x01, 0x12, 0x53, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x21, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x5d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, - 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x09, 0x50, 0x75, 0x74, 0x53, 0x69, - 0x6e, 0x67, 0x6c, 0x65, 0x12, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, - 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, - 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, - 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, - 0x05, 0x50, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, - 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, - 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x42, 0x62, 0x5a, 0x43, 0x67, 0x69, 0x74, - 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, - 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, - 0x66, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x62, 0x08, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x70, 0xe8, 0x07, + 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, + 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, + 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x53, 0x0a, + 0x08, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x12, 0x5d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x54, 0x0a, 0x09, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x12, 0x22, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x28, 0x01, 0x42, 0x62, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, + 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x73, 0x64, 0x6b, + 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x3b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x62, 0x08, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x70, 0xe8, 0x07, } var file_api_object_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 42) @@ -5948,6 +5980,7 @@ var file_api_object_grpc_service_proto_goTypes = []any{ (grpc1.ChecksumType)(0), // 55: neo.fs.v2.refs.ChecksumType (*Object)(nil), // 56: neo.fs.v2.object.Object (*Header_Attribute)(nil), // 57: neo.fs.v2.object.Header.Attribute + (*Header_Split)(nil), // 58: neo.fs.v2.object.Header.Split } var file_api_object_grpc_service_proto_depIdxs = []int32{ 20, // 0: neo.fs.v2.object.GetRequest.body:type_name -> neo.fs.v2.object.GetRequest.Body @@ -6040,32 +6073,33 @@ var file_api_object_grpc_service_proto_depIdxs = []int32{ 56, // 87: neo.fs.v2.object.PutSingleRequest.Body.object:type_name -> neo.fs.v2.object.Object 48, // 88: neo.fs.v2.object.PatchRequest.Body.address:type_name -> neo.fs.v2.refs.Address 57, // 89: neo.fs.v2.object.PatchRequest.Body.new_attributes:type_name -> neo.fs.v2.object.Header.Attribute - 40, // 90: neo.fs.v2.object.PatchRequest.Body.patch:type_name -> neo.fs.v2.object.PatchRequest.Body.Patch - 11, // 91: neo.fs.v2.object.PatchRequest.Body.Patch.source_range:type_name -> neo.fs.v2.object.Range - 51, // 92: neo.fs.v2.object.PatchResponse.Body.object_id:type_name -> neo.fs.v2.refs.ObjectID - 0, // 93: neo.fs.v2.object.ObjectService.Get:input_type -> neo.fs.v2.object.GetRequest - 2, // 94: neo.fs.v2.object.ObjectService.Put:input_type -> neo.fs.v2.object.PutRequest - 4, // 95: neo.fs.v2.object.ObjectService.Delete:input_type -> neo.fs.v2.object.DeleteRequest - 6, // 96: neo.fs.v2.object.ObjectService.Head:input_type -> neo.fs.v2.object.HeadRequest - 9, // 97: neo.fs.v2.object.ObjectService.Search:input_type -> neo.fs.v2.object.SearchRequest - 12, // 98: neo.fs.v2.object.ObjectService.GetRange:input_type -> neo.fs.v2.object.GetRangeRequest - 14, // 99: neo.fs.v2.object.ObjectService.GetRangeHash:input_type -> neo.fs.v2.object.GetRangeHashRequest - 16, // 100: neo.fs.v2.object.ObjectService.PutSingle:input_type -> neo.fs.v2.object.PutSingleRequest - 18, // 101: neo.fs.v2.object.ObjectService.Patch:input_type -> neo.fs.v2.object.PatchRequest - 1, // 102: neo.fs.v2.object.ObjectService.Get:output_type -> neo.fs.v2.object.GetResponse - 3, // 103: neo.fs.v2.object.ObjectService.Put:output_type -> neo.fs.v2.object.PutResponse - 5, // 104: neo.fs.v2.object.ObjectService.Delete:output_type -> neo.fs.v2.object.DeleteResponse - 8, // 105: neo.fs.v2.object.ObjectService.Head:output_type -> neo.fs.v2.object.HeadResponse - 10, // 106: neo.fs.v2.object.ObjectService.Search:output_type -> neo.fs.v2.object.SearchResponse - 13, // 107: neo.fs.v2.object.ObjectService.GetRange:output_type -> neo.fs.v2.object.GetRangeResponse - 15, // 108: neo.fs.v2.object.ObjectService.GetRangeHash:output_type -> neo.fs.v2.object.GetRangeHashResponse - 17, // 109: neo.fs.v2.object.ObjectService.PutSingle:output_type -> neo.fs.v2.object.PutSingleResponse - 19, // 110: neo.fs.v2.object.ObjectService.Patch:output_type -> neo.fs.v2.object.PatchResponse - 102, // [102:111] is the sub-list for method output_type - 93, // [93:102] is the sub-list for method input_type - 93, // [93:93] is the sub-list for extension type_name - 93, // [93:93] is the sub-list for extension extendee - 0, // [0:93] is the sub-list for field type_name + 58, // 90: neo.fs.v2.object.PatchRequest.Body.new_split_header:type_name -> neo.fs.v2.object.Header.Split + 40, // 91: neo.fs.v2.object.PatchRequest.Body.patch:type_name -> neo.fs.v2.object.PatchRequest.Body.Patch + 11, // 92: neo.fs.v2.object.PatchRequest.Body.Patch.source_range:type_name -> neo.fs.v2.object.Range + 51, // 93: neo.fs.v2.object.PatchResponse.Body.object_id:type_name -> neo.fs.v2.refs.ObjectID + 0, // 94: neo.fs.v2.object.ObjectService.Get:input_type -> neo.fs.v2.object.GetRequest + 2, // 95: neo.fs.v2.object.ObjectService.Put:input_type -> neo.fs.v2.object.PutRequest + 4, // 96: neo.fs.v2.object.ObjectService.Delete:input_type -> neo.fs.v2.object.DeleteRequest + 6, // 97: neo.fs.v2.object.ObjectService.Head:input_type -> neo.fs.v2.object.HeadRequest + 9, // 98: neo.fs.v2.object.ObjectService.Search:input_type -> neo.fs.v2.object.SearchRequest + 12, // 99: neo.fs.v2.object.ObjectService.GetRange:input_type -> neo.fs.v2.object.GetRangeRequest + 14, // 100: neo.fs.v2.object.ObjectService.GetRangeHash:input_type -> neo.fs.v2.object.GetRangeHashRequest + 16, // 101: neo.fs.v2.object.ObjectService.PutSingle:input_type -> neo.fs.v2.object.PutSingleRequest + 18, // 102: neo.fs.v2.object.ObjectService.Patch:input_type -> neo.fs.v2.object.PatchRequest + 1, // 103: neo.fs.v2.object.ObjectService.Get:output_type -> neo.fs.v2.object.GetResponse + 3, // 104: neo.fs.v2.object.ObjectService.Put:output_type -> neo.fs.v2.object.PutResponse + 5, // 105: neo.fs.v2.object.ObjectService.Delete:output_type -> neo.fs.v2.object.DeleteResponse + 8, // 106: neo.fs.v2.object.ObjectService.Head:output_type -> neo.fs.v2.object.HeadResponse + 10, // 107: neo.fs.v2.object.ObjectService.Search:output_type -> neo.fs.v2.object.SearchResponse + 13, // 108: neo.fs.v2.object.ObjectService.GetRange:output_type -> neo.fs.v2.object.GetRangeResponse + 15, // 109: neo.fs.v2.object.ObjectService.GetRangeHash:output_type -> neo.fs.v2.object.GetRangeHashResponse + 17, // 110: neo.fs.v2.object.ObjectService.PutSingle:output_type -> neo.fs.v2.object.PutSingleResponse + 19, // 111: neo.fs.v2.object.ObjectService.Patch:output_type -> neo.fs.v2.object.PatchResponse + 103, // [103:112] is the sub-list for method output_type + 94, // [94:103] is the sub-list for method input_type + 94, // [94:94] is the sub-list for extension type_name + 94, // [94:94] is the sub-list for extension extendee + 0, // [0:94] is the sub-list for field type_name } func init() { file_api_object_grpc_service_proto_init() } diff --git a/api/object/marshal.go b/api/object/marshal.go index 82e265b9..2ed888cb 100644 --- a/api/object/marshal.go +++ b/api/object/marshal.go @@ -136,10 +136,11 @@ const ( patchRequestBodyPatchRangeField = 1 patchRequestBodyPatchChunkField = 2 - patchRequestBodyAddrField = 1 - patchRequestBodyNewAttrsField = 2 - patchRequestBodyReplaceAttrField = 3 - patchRequestBodyPatchField = 4 + patchRequestBodyAddrField = 1 + patchRequestBodyNewAttrsField = 2 + patchRequestBodyReplaceAttrField = 3 + patchRequestBodyPatchField = 4 + patchRequestBodyNewSplitHeaderField = 5 patchResponseBodyObjectIDField = 1 ) @@ -1372,7 +1373,8 @@ func (r *PatchRequestBody) StableMarshal(buf []byte) []byte { offset += proto.NestedStructureMarshal(patchRequestBodyNewAttrsField, buf[offset:], &r.newAttributes[i]) } offset += proto.BoolMarshal(patchRequestBodyReplaceAttrField, buf[offset:], r.replaceAttributes) - proto.NestedStructureMarshal(patchRequestBodyPatchField, buf[offset:], r.patch) + offset += proto.NestedStructureMarshal(patchRequestBodyPatchField, buf[offset:], r.patch) + proto.NestedStructureMarshal(patchRequestBodyNewSplitHeaderField, buf[offset:], r.newSplitHeader) return buf } @@ -1389,6 +1391,7 @@ func (r *PatchRequestBody) StableSize() int { } size += proto.BoolSize(patchRequestBodyReplaceAttrField, r.replaceAttributes) size += proto.NestedStructureSize(patchRequestBodyPatchField, r.patch) + size += proto.NestedStructureSize(patchRequestBodyNewSplitHeaderField, r.newSplitHeader) return size } diff --git a/api/object/types.go b/api/object/types.go index 537fb029..57a93cee 100644 --- a/api/object/types.go +++ b/api/object/types.go @@ -360,6 +360,8 @@ type PatchRequestBody struct { newAttributes []Attribute + newSplitHeader *SplitHeader + replaceAttributes bool patch *PatchRequestBodyPatch @@ -1591,6 +1593,14 @@ func (r *PatchRequestBody) SetReplaceAttributes(replace bool) { r.replaceAttributes = replace } +func (r *PatchRequestBody) SetNewSplitHeader(newSplitHeader *SplitHeader) { + r.newSplitHeader = newSplitHeader +} + +func (r *PatchRequestBody) GetNewSplitHeader() *SplitHeader { + return r.newSplitHeader +} + func (r *PatchRequestBody) GetPatch() *PatchRequestBodyPatch { if r != nil { return r.patch diff --git a/api/rpc/client/flows.go b/api/rpc/client/flows.go index 671c6795..2a945b44 100644 --- a/api/rpc/client/flows.go +++ b/api/rpc/client/flows.go @@ -12,18 +12,20 @@ import ( // SendUnary initializes communication session by RPC info, performs unary RPC // and closes the session. func SendUnary(cli *Client, info common.CallMethodInfo, req, resp message.Message, opts ...CallOption) error { - rw, err := cli.Init(info, opts...) + rw, err := cli.initInternal(info, opts...) if err != nil { return err } err = rw.WriteMessage(req) if err != nil { + rw.cancel() return err } err = rw.ReadMessage(resp) if err != nil { + rw.cancel() return err } @@ -38,18 +40,28 @@ type MessageWriterCloser interface { } type clientStreamWriterCloser struct { - MessageReadWriter - + sw *streamWrapper resp message.Message } +// WriteMessage implements MessageWriterCloser. +func (c *clientStreamWriterCloser) WriteMessage(m message.Message) error { + return c.sw.WriteMessage(m) +} + func (c *clientStreamWriterCloser) Close() error { - err := c.MessageReadWriter.Close() + err := c.sw.closeSend() if err != nil { + c.sw.cancel() return err } - return c.ReadMessage(c.resp) + if err = c.sw.ReadMessage(c.resp); err != nil { + c.sw.cancel() + return err + } + + return c.sw.Close() } // OpenClientStream initializes communication session by RPC info, opens client-side stream @@ -57,14 +69,14 @@ func (c *clientStreamWriterCloser) Close() error { // // All stream writes must be performed before the closing. Close must be called once. func OpenClientStream(cli *Client, info common.CallMethodInfo, resp message.Message, opts ...CallOption) (MessageWriterCloser, error) { - rw, err := cli.Init(info, opts...) + rw, err := cli.initInternal(info, opts...) if err != nil { return nil, err } return &clientStreamWriterCloser{ - MessageReadWriter: rw, - resp: resp, + sw: rw, + resp: resp, }, nil } @@ -76,7 +88,7 @@ type MessageReaderCloser interface { } type serverStreamReaderCloser struct { - rw MessageReadWriter + rw *streamWrapper once sync.Once @@ -91,11 +103,15 @@ func (s *serverStreamReaderCloser) ReadMessage(msg message.Message) error { }) if err != nil { + s.rw.cancel() return err } err = s.rw.ReadMessage(msg) if !errors.Is(err, io.EOF) { + if err != nil { + s.rw.cancel() + } return err } @@ -112,7 +128,7 @@ func (s *serverStreamReaderCloser) ReadMessage(msg message.Message) error { // // All stream reads must be performed before the closing. Close must be called once. func OpenServerStream(cli *Client, info common.CallMethodInfo, req message.Message, opts ...CallOption) (MessageReader, error) { - rw, err := cli.Init(info, opts...) + rw, err := cli.initInternal(info, opts...) if err != nil { return nil, err } diff --git a/api/rpc/client/init.go b/api/rpc/client/init.go index 95e9301a..834f5dad 100644 --- a/api/rpc/client/init.go +++ b/api/rpc/client/init.go @@ -41,6 +41,10 @@ type MessageReadWriter interface { // Init initiates a messaging session and returns the interface for message transmitting. func (c *Client) Init(info common.CallMethodInfo, opts ...CallOption) (MessageReadWriter, error) { + return c.initInternal(info, opts...) +} + +func (c *Client) initInternal(info common.CallMethodInfo, opts ...CallOption) (*streamWrapper, error) { prm := defaultCallParameters() for _, opt := range opts { @@ -61,7 +65,13 @@ func (c *Client) Init(info common.CallMethodInfo, opts ...CallOption) (MessageRe // would propagate to all subsequent read/write operations on the opened stream, // which is not desired for the stream's lifecycle management. dialTimeoutTimer := time.NewTimer(c.dialTimeout) - defer dialTimeoutTimer.Stop() + defer func() { + dialTimeoutTimer.Stop() + select { + case <-dialTimeoutTimer.C: + default: + } + }() type newStreamRes struct { stream grpc.ClientStream @@ -91,7 +101,7 @@ func (c *Client) Init(info common.CallMethodInfo, opts ...CallOption) (MessageRe if res.stream != nil && res.err == nil { _ = res.stream.CloseSend() } - return nil, context.Canceled + return nil, context.DeadlineExceeded case res = <-newStreamCh: } diff --git a/api/rpc/client/options.go b/api/rpc/client/options.go index 5711cd4d..df6f6ed8 100644 --- a/api/rpc/client/options.go +++ b/api/rpc/client/options.go @@ -38,6 +38,7 @@ func (c *cfg) initDefault() { c.rwTimeout = defaultRWTimeout c.grpcDialOpts = []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDisableServiceConfig(), } } diff --git a/api/rpc/client/stream_wrapper.go b/api/rpc/client/stream_wrapper.go index 4c7bb1f6..85d5ad51 100644 --- a/api/rpc/client/stream_wrapper.go +++ b/api/rpc/client/stream_wrapper.go @@ -34,10 +34,15 @@ func (w streamWrapper) WriteMessage(m message.Message) error { }) } -func (w *streamWrapper) Close() error { +func (w *streamWrapper) closeSend() error { return w.withTimeout(w.ClientStream.CloseSend) } +func (w *streamWrapper) Close() error { + w.cancel() + return nil +} + func (w *streamWrapper) withTimeout(closure func() error) error { ch := make(chan error, 1) go func() { @@ -50,6 +55,10 @@ func (w *streamWrapper) withTimeout(closure func() error) error { select { case err := <-ch: tt.Stop() + select { + case <-tt.C: + default: + } return err case <-tt.C: w.cancel() diff --git a/api/status/grpc/types.pb.go b/api/status/grpc/types.pb.go index e44ae18b..a23fc090 100644 --- a/api/status/grpc/types.pb.go +++ b/api/status/grpc/types.pb.go @@ -144,6 +144,9 @@ const ( // request parameter as the client sent it incorrectly, then this code should // be used. CommonFail_INVALID_ARGUMENT CommonFail = 4 + // [**1029**] Resource exhausted failure. If the operation cannot be performed + // due to a lack of resources. + CommonFail_RESOURCE_EXHAUSTED CommonFail = 5 ) // Enum value maps for CommonFail. @@ -154,6 +157,7 @@ var ( 2: "SIGNATURE_VERIFICATION_FAIL", 3: "NODE_UNDER_MAINTENANCE", 4: "INVALID_ARGUMENT", + 5: "RESOURCE_EXHAUSTED", } CommonFail_value = map[string]int32{ "INTERNAL": 0, @@ -161,6 +165,7 @@ var ( "SIGNATURE_VERIFICATION_FAIL": 2, "NODE_UNDER_MAINTENANCE": 3, "INVALID_ARGUMENT": 4, + "RESOURCE_EXHAUSTED": 5, } ) @@ -654,7 +659,7 @@ var file_api_status_grpc_types_proto_rawDesc = []byte{ 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x05, 0x2a, 0x11, 0x0a, 0x07, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x2a, 0x85, 0x01, 0x0a, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x57, 0x52, 0x4f, 0x4e, 0x47, 0x5f, 0x4d, 0x41, 0x47, 0x49, 0x43, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, @@ -663,34 +668,35 @@ var file_api_status_grpc_types_proto_rawDesc = []byte{ 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, 0x45, - 0x4e, 0x54, 0x10, 0x04, 0x2a, 0x88, 0x01, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x11, 0x0a, 0x0d, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x54, - 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, - 0x45, 0x44, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4e, 0x4f, 0x4e, - 0x5f, 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, - 0x03, 0x12, 0x1a, 0x0a, 0x16, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x52, 0x45, - 0x41, 0x44, 0x59, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x04, 0x12, 0x10, 0x0a, - 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x05, 0x2a, - 0x55, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x13, - 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, - 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x41, 0x43, 0x4c, 0x5f, 0x4e, 0x4f, - 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4f, 0x4e, - 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, - 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x31, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, - 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, - 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x2b, 0x0a, 0x0a, 0x41, 0x50, 0x45, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x50, 0x45, 0x5f, 0x4d, - 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, - 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x42, 0x62, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, - 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, - 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0xaa, 0x02, 0x1a, - 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x41, 0x50, 0x49, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x62, 0x08, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x70, 0xe8, 0x07, + 0x4e, 0x54, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x05, 0x2a, 0x88, 0x01, 0x0a, + 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x43, 0x43, 0x45, 0x53, + 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x42, + 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, + 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, + 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, + 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x1a, 0x0a, 0x16, 0x4f, 0x42, 0x4a, + 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x52, 0x45, 0x4d, 0x4f, + 0x56, 0x45, 0x44, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x05, 0x2a, 0x55, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, + 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, + 0x0e, 0x45, 0x41, 0x43, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, + 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x41, + 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x31, + 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4f, 0x4b, + 0x45, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, + 0x01, 0x2a, 0x2b, 0x0a, 0x0a, 0x41, 0x50, 0x45, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, + 0x1d, 0x0a, 0x19, 0x41, 0x50, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x5f, 0x41, + 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x42, 0x62, + 0x5a, 0x43, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, + 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, + 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x62, 0x08, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x70, 0xe8, 0x07, } var file_api_status_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 7) diff --git a/api/status/grpc/types_protoopaque.pb.go b/api/status/grpc/types_protoopaque.pb.go index d1877abe..16ae0e94 100644 --- a/api/status/grpc/types_protoopaque.pb.go +++ b/api/status/grpc/types_protoopaque.pb.go @@ -144,6 +144,9 @@ const ( // request parameter as the client sent it incorrectly, then this code should // be used. CommonFail_INVALID_ARGUMENT CommonFail = 4 + // [**1029**] Resource exhausted failure. If the operation cannot be performed + // due to a lack of resources. + CommonFail_RESOURCE_EXHAUSTED CommonFail = 5 ) // Enum value maps for CommonFail. @@ -154,6 +157,7 @@ var ( 2: "SIGNATURE_VERIFICATION_FAIL", 3: "NODE_UNDER_MAINTENANCE", 4: "INVALID_ARGUMENT", + 5: "RESOURCE_EXHAUSTED", } CommonFail_value = map[string]int32{ "INTERNAL": 0, @@ -161,6 +165,7 @@ var ( "SIGNATURE_VERIFICATION_FAIL": 2, "NODE_UNDER_MAINTENANCE": 3, "INVALID_ARGUMENT": 4, + "RESOURCE_EXHAUSTED": 5, } ) @@ -676,7 +681,7 @@ var file_api_status_grpc_types_proto_rawDesc = []byte{ 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x05, 0x2a, 0x11, 0x0a, 0x07, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x2a, 0x85, 0x01, 0x0a, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x57, 0x52, 0x4f, 0x4e, 0x47, 0x5f, 0x4d, 0x41, 0x47, 0x49, 0x43, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, @@ -685,34 +690,35 @@ var file_api_status_grpc_types_proto_rawDesc = []byte{ 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, 0x45, - 0x4e, 0x54, 0x10, 0x04, 0x2a, 0x88, 0x01, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x11, 0x0a, 0x0d, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x54, - 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, - 0x45, 0x44, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4e, 0x4f, 0x4e, - 0x5f, 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, - 0x03, 0x12, 0x1a, 0x0a, 0x16, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x52, 0x45, - 0x41, 0x44, 0x59, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x04, 0x12, 0x10, 0x0a, - 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x05, 0x2a, - 0x55, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x13, - 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, - 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x41, 0x43, 0x4c, 0x5f, 0x4e, 0x4f, - 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4f, 0x4e, - 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, - 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x31, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, - 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, - 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x2b, 0x0a, 0x0a, 0x41, 0x50, 0x45, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x50, 0x45, 0x5f, 0x4d, - 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, - 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x42, 0x62, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, - 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, - 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0xaa, 0x02, 0x1a, - 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x41, 0x50, 0x49, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x62, 0x08, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x70, 0xe8, 0x07, + 0x4e, 0x54, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x05, 0x2a, 0x88, 0x01, 0x0a, + 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x43, 0x43, 0x45, 0x53, + 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x42, + 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, + 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, + 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, + 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x1a, 0x0a, 0x16, 0x4f, 0x42, 0x4a, + 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x52, 0x45, 0x4d, 0x4f, + 0x56, 0x45, 0x44, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x05, 0x2a, 0x55, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, + 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, + 0x0e, 0x45, 0x41, 0x43, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, + 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x41, + 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x31, + 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4f, 0x4b, + 0x45, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, + 0x01, 0x2a, 0x2b, 0x0a, 0x0a, 0x41, 0x50, 0x45, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, + 0x1d, 0x0a, 0x19, 0x41, 0x50, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x5f, 0x41, + 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x42, 0x62, + 0x5a, 0x43, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, + 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, + 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x62, 0x08, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x70, 0xe8, 0x07, } var file_api_status_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 7) diff --git a/api/status/status.go b/api/status/status.go index 53d361e3..7fe91114 100644 --- a/api/status/status.go +++ b/api/status/status.go @@ -65,6 +65,8 @@ const ( NodeUnderMaintenance // InvalidArgument is a local Code value for INVALID_ARGUMENT status. InvalidArgument + // ResourceExhausted is a local Code value for RESOURCE_EXHAUSTED status. + ResourceExhausted ) const ( diff --git a/bearer/bearer.go b/bearer/bearer.go index d1b77edd..3c4e0e91 100644 --- a/bearer/bearer.go +++ b/bearer/bearer.go @@ -313,15 +313,10 @@ func (b *Token) SetAPEOverride(v APEOverride) { b.apeOverrideSet = true } -// APEOverride returns APE override set by SetAPEOverride. -// -// Zero Token has zero APEOverride. -func (b *Token) APEOverride() APEOverride { - if b.apeOverrideSet { - return b.apeOverride - } - - return APEOverride{} +// APEOverride returns APE override set by SetAPEOverride and a flag that indicates whether override +// is set for the token. +func (b *Token) APEOverride() (override APEOverride, isSet bool) { + return b.apeOverride, b.apeOverrideSet } // SetImpersonate mark token as impersonate to consider token signer as request owner. diff --git a/bearer/bearer_test.go b/bearer/bearer_test.go index 650dc82c..cbba7c5e 100644 --- a/bearer/bearer_test.go +++ b/bearer/bearer_test.go @@ -93,7 +93,8 @@ func TestToken_SetAPEOverrides(t *testing.T) { val2 := filled require.NoError(t, val2.Unmarshal(val.Marshal())) - require.Zero(t, val2.APEOverride()) + _, isSet := val2.APEOverride() + require.False(t, isSet) val2 = filled @@ -101,14 +102,16 @@ func TestToken_SetAPEOverrides(t *testing.T) { require.NoError(t, err) require.NoError(t, val2.UnmarshalJSON(jd)) - require.Zero(t, val2.APEOverride()) + _, isSet = val2.APEOverride() + require.False(t, isSet) // set value tApe := bearertest.APEOverride() val.SetAPEOverride(tApe) - require.Equal(t, tApe, val.APEOverride()) + _, isSet = val.APEOverride() + require.True(t, isSet) val.WriteToV2(&m) require.NotNil(t, m.GetBody().GetAPEOverride()) @@ -117,7 +120,8 @@ func TestToken_SetAPEOverrides(t *testing.T) { val2 = filled require.NoError(t, val2.Unmarshal(val.Marshal())) - apeOverride := val2.APEOverride() + apeOverride, isSet := val2.APEOverride() + require.True(t, isSet) require.True(t, tokenAPEOverridesEqual(tApe.ToV2(), apeOverride.ToV2())) val2 = filled @@ -126,7 +130,8 @@ func TestToken_SetAPEOverrides(t *testing.T) { require.NoError(t, err) require.NoError(t, val2.UnmarshalJSON(jd)) - apeOverride = val.APEOverride() + apeOverride, isSet = val.APEOverride() + require.True(t, isSet) require.True(t, tokenAPEOverridesEqual(tApe.ToV2(), apeOverride.ToV2())) } diff --git a/client/object_patch.go b/client/object_patch.go index 6930644d..87033c52 100644 --- a/client/object_patch.go +++ b/client/object_patch.go @@ -26,11 +26,19 @@ import ( // usage is unsafe. type ObjectPatcher interface { // PatchAttributes patches attributes. Attributes can be patched no more than once, - // otherwise, the server returns an error. + // otherwise, the server returns an error. `PatchAttributes` and `PatchHeader` are mutually + // exclusive - only one method can be used. // // Result means success. Failure reason can be received via Close. PatchAttributes(ctx context.Context, newAttrs []object.Attribute, replace bool) bool + // PatchHeader patches object's header. Header can be patched no more than once, + // otherwise, the server returns an error. `PatchAttributes` and `PatchHeader` are mutually + // exclusive - only one method can be used. + // + // Result means success. Failure reason can be received via Close. + PatchHeader(ctx context.Context, prm PatchHeaderPrm) bool + // PatchPayload patches the object's payload. // // PatchPayload receives `payloadReader` and thus the payload of the patch is read and sent by chunks of @@ -60,6 +68,14 @@ type ObjectPatcher interface { Close(_ context.Context) (*ResObjectPatch, error) } +type PatchHeaderPrm struct { + NewSplitHeader *object.SplitHeader + + NewAttributes []object.Attribute + + ReplaceAttributes bool +} + // ResObjectPatch groups resulting values of ObjectPatch operation. type ResObjectPatch struct { statusRes @@ -163,6 +179,15 @@ func (x *objectPatcher) PatchAttributes(_ context.Context, newAttrs []object.Att }) } +func (x *objectPatcher) PatchHeader(_ context.Context, prm PatchHeaderPrm) bool { + return x.patch(&object.Patch{ + Address: x.addr, + NewAttributes: prm.NewAttributes, + ReplaceAttributes: prm.ReplaceAttributes, + NewSplitHeader: prm.NewSplitHeader, + }) +} + func (x *objectPatcher) PatchPayload(_ context.Context, rng *object.Range, payloadReader io.Reader) bool { offset := rng.GetOffset() diff --git a/client/object_patch_test.go b/client/object_patch_test.go index 63996b68..3e801f3a 100644 --- a/client/object_patch_test.go +++ b/client/object_patch_test.go @@ -177,7 +177,7 @@ func TestObjectPatcher(t *testing.T) { maxChunkLen: test.maxChunkLen, } - success := patcher.PatchAttributes(context.Background(), nil, false) + success := patcher.PatchHeader(context.Background(), PatchHeaderPrm{}) require.True(t, success) success = patcher.PatchPayload(context.Background(), test.rng, bytes.NewReader([]byte(test.patchPayload))) diff --git a/client/status/common.go b/client/status/common.go index 486bc72a..65c4ba31 100644 --- a/client/status/common.go +++ b/client/status/common.go @@ -296,3 +296,62 @@ func (x *InvalidArgument) SetMessage(v string) { func (x InvalidArgument) Message() string { return x.v2.Message() } + +// ResourceExhausted is a failure status indicating that +// the operation cannot be performed due to a lack of resources. +// Instances provide Status and StatusV2 interfaces. +type ResourceExhausted struct { + v2 status.Status +} + +const defaultResourceExhaustedMsg = "resource exhausted" + +// Error implements the error interface. +func (x *ResourceExhausted) Error() string { + msg := x.v2.Message() + if msg == "" { + msg = defaultResourceExhaustedMsg + } + + return errMessageStatusV2( + globalizeCodeV2(status.ResourceExhausted, status.GlobalizeCommonFail), + msg, + ) +} + +// implements local interface defined in FromStatusV2 func. +func (x *ResourceExhausted) fromStatusV2(st *status.Status) { + x.v2 = *st +} + +// ToStatusV2 implements StatusV2 interface method. +// If the value was returned by FromStatusV2, returns the source message. +// Otherwise, returns message with +// - code: RESOURCE_EXHAUSTED; +// - string message: written message via SetMessage or +// "resource exhausted" as a default message; +// - details: empty. +func (x ResourceExhausted) ToStatusV2() *status.Status { + x.v2.SetCode(globalizeCodeV2(status.ResourceExhausted, status.GlobalizeCommonFail)) + if x.v2.Message() == "" { + x.v2.SetMessage(defaultResourceExhaustedMsg) + } + + return &x.v2 +} + +// SetMessage writes invalid argument failure message. +// Message should be used for debug purposes only. +// +// See also Message. +func (x *ResourceExhausted) SetMessage(v string) { + x.v2.SetMessage(v) +} + +// Message returns status message. Zero status returns empty message. +// Message should be used for debug purposes only. +// +// See also SetMessage. +func (x ResourceExhausted) Message() string { + return x.v2.Message() +} diff --git a/client/status/common_test.go b/client/status/common_test.go index e55883e4..0c3d2d5d 100644 --- a/client/status/common_test.go +++ b/client/status/common_test.go @@ -167,3 +167,42 @@ func TestInvalidArgument(t *testing.T) { require.Equal(t, msg, stV2.Message()) }) } + +func TestResourceExhausted(t *testing.T) { + t.Run("default", func(t *testing.T) { + var st apistatus.ResourceExhausted + + require.Empty(t, st.Message()) + }) + + t.Run("custom message", func(t *testing.T) { + var st apistatus.ResourceExhausted + msg := "some message" + + st.SetMessage(msg) + + stV2 := st.ToStatusV2() + + require.Equal(t, msg, st.Message()) + require.Equal(t, msg, stV2.Message()) + }) + + t.Run("empty to V2", func(t *testing.T) { + var st apistatus.ResourceExhausted + + stV2 := st.ToStatusV2() + + require.Equal(t, "resource exhausted", stV2.Message()) + }) + + t.Run("non-empty to V2", func(t *testing.T) { + var st apistatus.ResourceExhausted + msg := "some other msg" + + st.SetMessage(msg) + + stV2 := st.ToStatusV2() + + require.Equal(t, msg, stV2.Message()) + }) +} diff --git a/client/status/v2.go b/client/status/v2.go index 95dfb8a4..6ee3d84a 100644 --- a/client/status/v2.go +++ b/client/status/v2.go @@ -80,6 +80,8 @@ func FromStatusV2(st *status.Status) Status { decoder = new(NodeUnderMaintenance) case status.InvalidArgument: decoder = new(InvalidArgument) + case status.ResourceExhausted: + decoder = new(ResourceExhausted) } case object.LocalizeFailStatus(&code): switch code { diff --git a/container/container.go b/container/container.go index ff63adb9..f10c3202 100644 --- a/container/container.go +++ b/container/container.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "errors" "fmt" + "iter" "strconv" "strings" "time" @@ -337,10 +338,41 @@ func (x Container) Attribute(key string) string { return "" } +// Attributes returns an iterator over all Container attributes. +// +// See also [Container.SetAttribute], [Container.UserAttributes]. +func (x Container) Attributes() iter.Seq2[string, string] { + return func(yield func(string, string) bool) { + attrs := x.v2.GetAttributes() + for i := range attrs { + if !yield(attrs[i].GetKey(), attrs[i].GetValue()) { + return + } + } + } +} + +// Attributes returns an iterator over all non-system Container attributes. +// +// See also [Container.SetAttribute], [Container.Attributes]. +func (x Container) UserAttributes() iter.Seq2[string, string] { + return func(yield func(string, string) bool) { + for key, value := range x.Attributes() { + if !strings.HasPrefix(key, container.SysAttributePrefix) { + if !yield(key, value) { + return + } + } + } + } +} + // IterateAttributes iterates over all Container attributes and passes them // into f. The handler MUST NOT be nil. // // See also SetAttribute, Attribute. +// +// Deprecated: use [Container.Attributes] instead. func (x Container) IterateAttributes(f func(key, val string)) { attrs := x.v2.GetAttributes() for i := range attrs { @@ -352,6 +384,8 @@ func (x Container) IterateAttributes(f func(key, val string)) { // into f. The handler MUST NOT be nil. // // See also SetAttribute, Attribute. +// +// Deprecated: use [Container.UserAttributes] instead. func (x Container) IterateUserAttributes(f func(key, val string)) { attrs := x.v2.GetAttributes() for _, attr := range attrs { diff --git a/container/container_test.go b/container/container_test.go index a66a866f..62422aba 100644 --- a/container/container_test.go +++ b/container/container_test.go @@ -2,6 +2,7 @@ package container_test import ( "crypto/sha256" + "maps" "strconv" "testing" "time" @@ -159,9 +160,9 @@ func TestContainer_Attribute(t *testing.T) { val.SetAttribute(attrKey2, attrVal2) var i int - val.IterateUserAttributes(func(key, val string) { + for range val.UserAttributes() { i++ - }) + } require.Equal(t, 1, i) var msg v2container.Container @@ -177,11 +178,7 @@ func TestContainer_Attribute(t *testing.T) { require.Equal(t, attrVal1, val2.Attribute(attrKey1)) require.Equal(t, attrVal2, val2.Attribute(attrKey2)) - m := map[string]string{} - - val2.IterateAttributes(func(key, val string) { - m[key] = val - }) + m := maps.Collect(val2.Attributes()) require.GreaterOrEqual(t, len(m), 2) require.Equal(t, attrVal1, m[attrKey1]) diff --git a/container/id/id.go b/container/id/id.go index 569968aa..1cbd60b5 100644 --- a/container/id/id.go +++ b/container/id/id.go @@ -3,6 +3,7 @@ package cid import ( "crypto/sha256" "fmt" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "github.com/mr-tron/base58" @@ -113,3 +114,9 @@ func (id *ID) DecodeString(s string) error { func (id ID) String() string { return id.EncodeToString() } + +// Cmp returns an integer comparing two base58 encoded container ID lexicographically. +// The result will be 0 if id1 == id2, -1 if id1 < id2, and +1 if id1 > id2. +func (id ID) Cmp(id2 ID) int { + return strings.Compare(id.EncodeToString(), id2.EncodeToString()) +} diff --git a/container/id/id_test.go b/container/id/id_test.go index 6f60d923..2bbd8b34 100644 --- a/container/id/id_test.go +++ b/container/id/id_test.go @@ -3,6 +3,8 @@ package cid_test import ( "crypto/rand" "crypto/sha256" + "slices" + "strings" "testing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" @@ -106,3 +108,17 @@ func TestID_Encode(t *testing.T) { require.Equal(t, emptyID, id.EncodeToString()) }) } + +func TestID_Cmp(t *testing.T) { + var arr []cid.ID + for i := 0; i < 3; i++ { + checksum := randSHA256Checksum() + arr = append(arr, cidtest.IDWithChecksum(checksum)) + } + + slices.SortFunc(arr, cid.ID.Cmp) + + for i := 1; i < len(arr); i++ { + require.NotEqual(t, strings.Compare(arr[i-1].EncodeToString(), arr[i].EncodeToString()), 1, "array is not sorted correctly") + } +} diff --git a/container/iterators_test.go b/container/iterators_test.go new file mode 100644 index 00000000..00522a5c --- /dev/null +++ b/container/iterators_test.go @@ -0,0 +1,95 @@ +package container_test + +import ( + "testing" + + containerAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "github.com/stretchr/testify/require" +) + +func TestContainer_Attributes(t *testing.T) { + t.Run("empty", func(t *testing.T) { + var n container.Container + t.Run("attributes", func(t *testing.T) { + for range n.Attributes() { + t.Fatalf("handler is called, but it shouldn't") + } + }) + t.Run("user attributes", func(t *testing.T) { + for range n.UserAttributes() { + t.Fatalf("handler is called, but it shouldn't") + } + }) + }) + + var n container.Container + n.SetAttribute(containerAPI.SysAttributeName, "myname") + n.SetAttribute("key1", "value1") + n.SetAttribute("key2", "value2") + n.SetAttribute(containerAPI.SysAttributeZone, "test") + + t.Run("break", func(t *testing.T) { + t.Run("attributes", func(t *testing.T) { + var res [][2]string + for key, value := range n.Attributes() { + if key == "key2" { + break + } + res = append(res, [2]string{key, value}) + } + require.Equal(t, [][2]string{{containerAPI.SysAttributeName, "myname"}, {"key1", "value1"}}, res) + }) + t.Run("user attributes", func(t *testing.T) { + var res [][2]string + for key, value := range n.UserAttributes() { + if key == "key2" { + break + } + res = append(res, [2]string{key, value}) + } + require.Equal(t, [][2]string{{"key1", "value1"}}, res) + }) + }) + t.Run("continue", func(t *testing.T) { + t.Run("attributes", func(t *testing.T) { + var res [][2]string + for key, value := range n.Attributes() { + if key == "key2" { + continue + } + res = append(res, [2]string{key, value}) + } + require.Equal(t, [][2]string{{containerAPI.SysAttributeName, "myname"}, {"key1", "value1"}, {containerAPI.SysAttributeZone, "test"}}, res) + }) + t.Run("user attributes", func(t *testing.T) { + var res [][2]string + for key, value := range n.UserAttributes() { + if key == "key2" { + continue + } + res = append(res, [2]string{key, value}) + } + require.Equal(t, [][2]string{{"key1", "value1"}}, res) + }) + }) + t.Run("attributes", func(t *testing.T) { + var res [][2]string + for key, value := range n.Attributes() { + res = append(res, [2]string{key, value}) + } + require.Equal(t, [][2]string{ + {containerAPI.SysAttributeName, "myname"}, + {"key1", "value1"}, + {"key2", "value2"}, + {containerAPI.SysAttributeZone, "test"}, + }, res) + }) + t.Run("user attributes", func(t *testing.T) { + var res [][2]string + for key, value := range n.UserAttributes() { + res = append(res, [2]string{key, value}) + } + require.Equal(t, [][2]string{{"key1", "value1"}, {"key2", "value2"}}, res) + }) +} diff --git a/go.mod b/go.mod index 1cca977b..76ef49eb 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module git.frostfs.info/TrueCloudLab/frostfs-sdk-go -go 1.22 +go 1.23.0 require ( git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e @@ -14,11 +14,11 @@ require ( github.com/klauspost/reedsolomon v1.12.1 github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.14.0 + github.com/multiformats/go-multiaddr v0.15.0 github.com/nspcc-dev/neo-go v0.106.2 github.com/stretchr/testify v1.9.0 go.uber.org/zap v1.27.0 - golang.org/x/sync v0.10.0 + golang.org/x/sync v0.12.0 google.golang.org/grpc v1.69.2 google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v3 v3.0.1 @@ -30,9 +30,9 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/golang/snappy v0.0.1 // indirect github.com/gorilla/websocket v1.5.1 // indirect - github.com/ipfs/go-cid v0.0.7 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect @@ -49,12 +49,12 @@ require ( github.com/twmb/murmur3 v1.1.8 // indirect go.etcd.io/bbolt v1.3.9 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 0d898c1e..1fd06ca2 100644 --- a/go.sum +++ b/go.sum @@ -62,12 +62,12 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= -github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -76,31 +76,22 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= -github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= -github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= +github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk= @@ -167,14 +158,13 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -182,8 +172,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -194,19 +184,18 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -235,7 +224,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/netmap/aggregator.go b/netmap/aggregator.go index 1faba9e0..d0894f16 100644 --- a/netmap/aggregator.go +++ b/netmap/aggregator.go @@ -54,7 +54,7 @@ var ( // capacity and price. func newWeightFunc(capNorm, priceNorm normalizer) weightFunc { return func(n NodeInfo) float64 { - return capNorm.Normalize(float64(n.capacity())) * priceNorm.Normalize(float64(n.Price())) + return capNorm.Normalize(float64(n.capacity)) * priceNorm.Normalize(float64(n.price)) } } diff --git a/netmap/context.go b/netmap/context.go index 27727253..2d81999b 100644 --- a/netmap/context.go +++ b/netmap/context.go @@ -97,8 +97,8 @@ func defaultWeightFunc(ns nodes) weightFunc { minV := newMinAgg() for i := range ns { - mean.Add(float64(ns[i].capacity())) - minV.Add(float64(ns[i].Price())) + mean.Add(float64(ns[i].capacity)) + minV.Add(float64(ns[i].price)) } return newWeightFunc( diff --git a/netmap/filter.go b/netmap/filter.go index 38230b76..9fe346c0 100644 --- a/netmap/filter.go +++ b/netmap/filter.go @@ -133,9 +133,9 @@ func (c *context) matchKeyValue(f *netmap.Filter, b NodeInfo) bool { switch f.GetKey() { case attrPrice: - attr = b.Price() + attr = b.price case attrCapacity: - attr = b.capacity() + attr = b.capacity default: var err error diff --git a/netmap/netmap.go b/netmap/netmap.go index 0d3b6683..a5834558 100644 --- a/netmap/netmap.go +++ b/netmap/netmap.go @@ -96,6 +96,21 @@ func (m NetMap) Epoch() uint64 { return m.epoch } +// Clone returns a copy of NetMap. +func (m *NetMap) Clone() *NetMap { + if m == nil { + return nil + } + dst := NetMap{ + epoch: m.epoch, + nodes: make([]NodeInfo, len(m.nodes)), + } + for i, node := range m.nodes { + dst.nodes[i] = *node.Clone() + } + return &dst +} + // nodes is a slice of NodeInfo instances needed for HRW sorting. type nodes []NodeInfo @@ -124,20 +139,12 @@ func (n nodes) appendWeightsTo(wf weightFunc, w []float64) []float64 { return w } -func flattenNodes(ns []nodes) nodes { - var sz, i int - - for i = range ns { - sz += len(ns[i]) - } - - result := make(nodes, 0, sz) - +// flattenNodes flattens ns nested list and appends the result to the target slice. +func flattenNodes(target nodes, ns []nodes) nodes { for i := range ns { - result = append(result, ns[i]...) + target = append(target, ns[i]...) } - - return result + return target } // PlacementVectors sorts container nodes returned by ContainerNodes method @@ -272,7 +279,7 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, pivot []byte) ([][]NodeInfo, e return nil, err } - result[i] = append(result[i], flattenNodes(nodes)...) + result[i] = flattenNodes(result[i], nodes) if unique { c.addUsedNodes(result[i]...) @@ -289,14 +296,14 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, pivot []byte) ([][]NodeInfo, e if err != nil { return nil, err } - result[i] = append(result[i], flattenNodes(nodes)...) + result[i] = flattenNodes(result[i], nodes) c.addUsedNodes(result[i]...) } else { nodes, ok := c.selections[sName] if !ok { return nil, fmt.Errorf("selector not found: REPLICA '%s'", sName) } - result[i] = append(result[i], flattenNodes(nodes)...) + result[i] = flattenNodes(result[i], nodes) } } diff --git a/netmap/netmap_test.go b/netmap/netmap_test.go index 2aab5425..5be54127 100644 --- a/netmap/netmap_test.go +++ b/netmap/netmap_test.go @@ -1,6 +1,7 @@ package netmap_test import ( + "bytes" "testing" v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" @@ -45,3 +46,28 @@ func TestNetMap_SetEpoch(t *testing.T) { require.EqualValues(t, e, m.Epoch()) } + +func TestNetMap_Clone(t *testing.T) { + nm := new(netmap.NetMap) + nm.SetEpoch(1) + var ni netmap.NodeInfo + ni.SetPublicKey([]byte{1, 2, 3}) + nm.SetNodes([]netmap.NodeInfo{ni}) + + clone := nm.Clone() + + require.True(t, clone != nm) + require.True(t, &(clone.Nodes()[0]) != &(nm.Nodes()[0])) + + var clonev2 v2netmap.NetMap + clone.WriteToV2(&clonev2) + var bufClone []byte + bufClone = clonev2.StableMarshal(bufClone) + + var nmv2 v2netmap.NetMap + nm.WriteToV2(&nmv2) + var bufNM []byte + bufNM = nmv2.StableMarshal(bufNM) + + require.True(t, bytes.Equal(bufClone, bufNM)) +} diff --git a/netmap/network_info.go b/netmap/network_info.go index 11b0f14c..a270afe5 100644 --- a/netmap/network_info.go +++ b/netmap/network_info.go @@ -30,20 +30,19 @@ func (x *NetworkInfo) readFromV2(m netmap.NetworkInfo, checkFieldPresence bool) return errors.New("missing network config") } - if checkFieldPresence && c.NumberOfParameters() <= 0 { + if checkFieldPresence && len(c.Parameters()) == 0 { return errors.New("missing network parameters") } var err error - mNames := make(map[string]struct{}, c.NumberOfParameters()) + mNames := make(map[string]struct{}, len(c.Parameters())) - c.IterateParameters(func(prm *netmap.NetworkParameter) bool { + for _, prm := range c.Parameters() { name := string(prm.GetKey()) _, was := mNames[name] if was { - err = fmt.Errorf("duplicated parameter name: %s", name) - return true + return fmt.Errorf("duplicated parameter name: %s", name) } mNames[name] = struct{}{} @@ -67,14 +66,8 @@ func (x *NetworkInfo) readFromV2(m netmap.NetworkInfo, checkFieldPresence bool) } if err != nil { - err = fmt.Errorf("invalid %s parameter: %w", name, err) + return fmt.Errorf("invalid %s parameter: %w", name, err) } - - return err != nil - }) - - if err != nil { - return err } x.m = m @@ -152,41 +145,29 @@ func (x *NetworkInfo) setConfig(name string, val []byte) { return } - found := false - prms := make([]netmap.NetworkParameter, 0, c.NumberOfParameters()) - - c.IterateParameters(func(prm *netmap.NetworkParameter) bool { - found = bytes.Equal(prm.GetKey(), []byte(name)) - if found { - prm.SetValue(val) - } else { - prms = append(prms, *prm) + prms := c.Parameters() + for i := range prms { + if bytes.Equal(prms[i].GetKey(), []byte(name)) { + prms[i].SetValue(val) + return } - - return found - }) - - if !found { - prms = append(prms, netmap.NetworkParameter{}) - prms[len(prms)-1].SetKey([]byte(name)) - prms[len(prms)-1].SetValue(val) - - c.SetParameters(prms...) } + + prms = append(prms, netmap.NetworkParameter{}) + prms[len(prms)-1].SetKey([]byte(name)) + prms[len(prms)-1].SetValue(val) + + c.SetParameters(prms...) } func (x NetworkInfo) configValue(name string) (res []byte) { - x.m.GetNetworkConfig().IterateParameters(func(prm *netmap.NetworkParameter) bool { + for _, prm := range x.m.GetNetworkConfig().Parameters() { if string(prm.GetKey()) == name { - res = prm.GetValue() - - return true + return prm.GetValue() } + } - return false - }) - - return + return nil } // SetRawNetworkParameter sets named FrostFS network parameter whose value is @@ -218,7 +199,7 @@ func (x *NetworkInfo) RawNetworkParameter(name string) []byte { func (x *NetworkInfo) IterateRawNetworkParameters(f func(name string, value []byte)) { c := x.m.GetNetworkConfig() - c.IterateParameters(func(prm *netmap.NetworkParameter) bool { + for _, prm := range c.Parameters() { name := string(prm.GetKey()) switch name { default: @@ -237,9 +218,7 @@ func (x *NetworkInfo) IterateRawNetworkParameters(f func(name string, value []by configHomomorphicHashingDisabled, configMaintenanceModeAllowed: } - - return false - }) + } } func (x *NetworkInfo) setConfigUint64(name string, num uint64) { diff --git a/netmap/network_info_test.go b/netmap/network_info_test.go index 7e6dc12d..64abe5d3 100644 --- a/netmap/network_info_test.go +++ b/netmap/network_info_test.go @@ -76,16 +76,10 @@ func testConfigValue(t *testing.T, var m netmap.NetworkInfo x.WriteToV2(&m) - require.EqualValues(t, 1, m.GetNetworkConfig().NumberOfParameters()) - found := false - m.GetNetworkConfig().IterateParameters(func(prm *netmap.NetworkParameter) bool { - require.False(t, found) - require.Equal(t, []byte(v2Key), prm.GetKey()) - require.Equal(t, v2Val(exp), prm.GetValue()) - found = true - return false - }) - require.True(t, found) + var p netmap.NetworkParameter + p.SetKey([]byte(v2Key)) + p.SetValue(v2Val(exp)) + require.Equal(t, []netmap.NetworkParameter{p}, m.GetNetworkConfig().Parameters()) } setter(&x, val1) diff --git a/netmap/node_info.go b/netmap/node_info.go index 0d250ce5..ba65daad 100644 --- a/netmap/node_info.go +++ b/netmap/node_info.go @@ -3,6 +3,7 @@ package netmap import ( "errors" "fmt" + "iter" "slices" "strconv" "strings" @@ -25,6 +26,9 @@ import ( type NodeInfo struct { m netmap.NodeInfo hash uint64 + + capacity uint64 + price uint64 } // reads NodeInfo from netmap.NodeInfo message. If checkFieldPresence is set, @@ -32,6 +36,7 @@ type NodeInfo struct { // presented field according to FrostFS API V2 protocol. func (x *NodeInfo) readFromV2(m netmap.NodeInfo, checkFieldPresence bool) error { var err error + var capacity, price uint64 binPublicKey := m.GetPublicKey() if checkFieldPresence && len(binPublicKey) == 0 { @@ -49,18 +54,18 @@ func (x *NodeInfo) readFromV2(m netmap.NodeInfo, checkFieldPresence bool) error if key == "" { return fmt.Errorf("empty key of the attribute #%d", i) } else if _, ok := mAttr[key]; ok { - return fmt.Errorf("duplicated attbiuted %s", key) + return fmt.Errorf("duplicate attributes %s", key) } + mAttr[key] = struct{}{} switch { case key == attrCapacity: - _, err = strconv.ParseUint(attributes[i].GetValue(), 10, 64) + capacity, err = strconv.ParseUint(attributes[i].GetValue(), 10, 64) if err != nil { return fmt.Errorf("invalid %s attribute: %w", attrCapacity, err) } case key == attrPrice: - var err error - _, err = strconv.ParseUint(attributes[i].GetValue(), 10, 64) + price, err = strconv.ParseUint(attributes[i].GetValue(), 10, 64) if err != nil { return fmt.Errorf("invalid %s attribute: %w", attrPrice, err) } @@ -73,6 +78,8 @@ func (x *NodeInfo) readFromV2(m netmap.NodeInfo, checkFieldPresence bool) error x.m = m x.hash = hrw.Hash(binPublicKey) + x.capacity = capacity + x.price = price return nil } @@ -200,12 +207,28 @@ func (x NodeInfo) NumberOfNetworkEndpoints() int { // FrostFS system requirements. // // See also SetNetworkEndpoints. +// +// Deprecated: use [NodeInfo.NetworkEndpoints] instead. func (x NodeInfo) IterateNetworkEndpoints(f func(string) bool) { - x.m.IterateAddresses(f) + for s := range x.NetworkEndpoints() { + if f(s) { + return + } + } +} + +// NetworkEndpoints returns an iterator over network endpoints announced by the +// node. +// +// See also SetNetworkEndpoints. +func (x NodeInfo) NetworkEndpoints() iter.Seq[string] { + return x.m.Addresses() } // IterateNetworkEndpoints is an extra-sugared function over IterateNetworkEndpoints // method which allows to unconditionally iterate over all node's network endpoints. +// +// Deprecated: use [NodeInfo.NetworkEndpoints] instead. func IterateNetworkEndpoints(node NodeInfo, f func(string)) { node.IterateNetworkEndpoints(func(addr string) bool { f(addr) @@ -235,46 +258,21 @@ func (x *NodeInfo) setNumericAttribute(key string, num uint64) { // price is announced. func (x *NodeInfo) SetPrice(price uint64) { x.setNumericAttribute(attrPrice, price) + x.price = price } // Price returns price set using SetPrice. // // Zero NodeInfo has zero price. func (x NodeInfo) Price() uint64 { - val := x.Attribute(attrPrice) - if val == "" { - return 0 - } - - price, err := strconv.ParseUint(val, 10, 64) - if err != nil { - panic(fmt.Sprintf("unexpected price parsing error %s: %v", val, err)) - } - - return price + return x.price } // SetCapacity sets the storage capacity declared by the node. By default, zero // capacity is announced. func (x *NodeInfo) SetCapacity(capacity uint64) { x.setNumericAttribute(attrCapacity, capacity) -} - -// capacity returns capacity set using SetCapacity. -// -// Zero NodeInfo has zero capacity. -func (x NodeInfo) capacity() uint64 { - val := x.Attribute(attrCapacity) - if val == "" { - return 0 - } - - capacity, err := strconv.ParseUint(val, 10, 64) - if err != nil { - panic(fmt.Sprintf("unexpected capacity parsing error %s: %v", val, err)) - } - - return capacity + x.capacity = capacity } const attrUNLOCODE = "UN-LOCODE" @@ -393,8 +391,22 @@ func (x NodeInfo) NumberOfAttributes() int { return len(x.m.GetAttributes()) } +// Attributes returns an iterator over node attributes. +func (x NodeInfo) Attributes() iter.Seq2[string, string] { + return func(yield func(string, string) bool) { + a := x.m.GetAttributes() + for i := range a { + if !yield(a[i].GetKey(), a[i].GetValue()) { + break + } + } + } +} + // IterateAttributes iterates over all node attributes and passes the into f. // Handler MUST NOT be nil. +// +// Deprecated: use [NodeInfo.Attributes] instead. func (x NodeInfo) IterateAttributes(f func(key, value string)) { a := x.m.GetAttributes() for i := range a { @@ -411,6 +423,17 @@ func (x *NodeInfo) SetAttribute(key, value string) { panic("empty value in SetAttribute") } + // NodeInfo with non-numeric `Price`` or `Capacity` attributes + // is considered invalid by NodeInfo.readFromV2(). + // Here we have no way to signal an error, and panic seems an overkill. + // So, set cached fields only if we can parse the value and 0 parsing fails. + switch key { + case attrPrice: + x.price, _ = strconv.ParseUint(value, 10, 64) + case attrCapacity: + x.capacity, _ = strconv.ParseUint(value, 10, 64) + } + a := x.m.GetAttributes() for i := range a { if a[i].GetKey() == key { @@ -563,6 +586,17 @@ func (x *NodeInfo) SetStatus(state NodeState) { x.m.SetState(netmap.NodeState(state)) } +// Clone returns a copy of NodeInfo. +func (x *NodeInfo) Clone() *NodeInfo { + if x == nil { + return nil + } + return &NodeInfo{ + hash: x.hash, + m: *x.m.Clone(), + } +} + // String implements fmt.Stringer. // // String is designed to be human-readable, and its format MAY differ between diff --git a/netmap/node_info_test.go b/netmap/node_info_test.go index 54c78b2c..65ea0e80 100644 --- a/netmap/node_info_test.go +++ b/netmap/node_info_test.go @@ -1,12 +1,94 @@ package netmap import ( + "fmt" "testing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/stretchr/testify/require" ) +func TestNodeInfo_NetworkEndpoints(t *testing.T) { + t.Run("empty", func(t *testing.T) { + var n NodeInfo + for range n.NetworkEndpoints() { + t.Fatalf("handler is called, but it shouldn't") + } + }) + + var n NodeInfo + n.SetNetworkEndpoints("1", "2", "3") + + t.Run("break", func(t *testing.T) { + var res []string + for s := range n.NetworkEndpoints() { + if s == "2" { + break + } + res = append(res, s) + } + require.Equal(t, []string{"1"}, res) + }) + t.Run("continue", func(t *testing.T) { + var res []string + for s := range n.NetworkEndpoints() { + if s == "2" { + continue + } + res = append(res, s) + } + require.Equal(t, []string{"1", "3"}, res) + }) + + var res []string + for s := range n.NetworkEndpoints() { + res = append(res, s) + } + require.Equal(t, []string{"1", "2", "3"}, res) +} + +func TestNodeInfo_Attributes(t *testing.T) { + t.Run("empty", func(t *testing.T) { + var n NodeInfo + for range n.Attributes() { + t.Fatalf("handler is called, but it shouldn't") + } + }) + + var n NodeInfo + n.SetAttribute("key1", "value1") + n.SetAttribute("key2", "value2") + n.SetAttribute("key3", "value3") + + t.Run("break", func(t *testing.T) { + var res [][2]string + for k, v := range n.Attributes() { + if k == "key2" { + break + } + res = append(res, [2]string{k, v}) + } + require.Equal(t, [][2]string{{"key1", "value1"}}, res) + }) + t.Run("continue", func(t *testing.T) { + var res [][2]string + for k, v := range n.Attributes() { + if k == "key2" { + continue + } + res = append(res, [2]string{k, v}) + } + require.Equal(t, [][2]string{{"key1", "value1"}, {"key3", "value3"}}, res) + }) + + var res [][2]string + for k, v := range n.Attributes() { + res = append(res, [2]string{k, v}) + } + require.Equal(t, [][2]string{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}}, res) +} + func TestNodeInfo_SetAttribute(t *testing.T) { var n NodeInfo @@ -108,3 +190,94 @@ func TestNodeInfo_ExternalAddr(t *testing.T) { n.SetExternalAddresses(addr[1:]...) require.Equal(t, addr[1:], n.ExternalAddresses()) } + +func TestNodeInfo_Clone(t *testing.T) { + var ni NodeInfo + ni.SetPublicKey([]byte{2, 3}) + + c := ni.Clone() + require.True(t, c != &ni) + require.True(t, &(c.PublicKey()[0]) != &(ni.PublicKey()[0])) +} + +func TestNodeInfo_Unmarshal(t *testing.T) { + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + + attrs := make([]netmap.Attribute, 2) + for i := range attrs { + attrs[i].SetKey(fmt.Sprintf("key%d", i)) + attrs[i].SetValue(fmt.Sprintf("value%d", i)) + } + goodNodeInfo := func() netmap.NodeInfo { + var nodev2 netmap.NodeInfo + nodev2.SetPublicKey(pk.PublicKey().Bytes()) + nodev2.SetAddresses("127.0.0.1:2025") + nodev2.SetState(netmap.Online) + nodev2.SetAttributes(attrs) + return nodev2 + } + + // Check that goodNodeInfo indeed returns good node. + // Otherwise, the whole test is garbage. + require.NoError(t, new(NodeInfo).ReadFromV2(goodNodeInfo())) + + t.Run("empty public key", func(t *testing.T) { + n := goodNodeInfo() + n.SetPublicKey(nil) + require.ErrorContains(t, new(NodeInfo).ReadFromV2(n), "missing public key") + }) + t.Run("missing addresses", func(t *testing.T) { + n := goodNodeInfo() + n.SetAddresses() + require.ErrorContains(t, new(NodeInfo).ReadFromV2(n), "missing network endpoints") + }) + t.Run("empty attribute key", func(t *testing.T) { + n := goodNodeInfo() + + var a netmap.Attribute + a.SetValue("non-empty") + n.SetAttributes(append(attrs, a)) + require.ErrorContains(t, new(NodeInfo).ReadFromV2(n), + fmt.Sprintf("empty key of the attribute #%d", len(attrs))) + }) + t.Run("empty attribute value", func(t *testing.T) { + n := goodNodeInfo() + + var a netmap.Attribute + a.SetKey("non-empty-key") + n.SetAttributes(append(attrs, a)) + require.ErrorContains(t, new(NodeInfo).ReadFromV2(n), + "empty value of the attribute non-empty-key") + }) + t.Run("invalid price attribute", func(t *testing.T) { + n := goodNodeInfo() + + var a netmap.Attribute + a.SetKey(attrPrice) + a.SetValue("not a number") + n.SetAttributes(append(attrs, a)) + require.ErrorContains(t, new(NodeInfo).ReadFromV2(n), + fmt.Sprintf("invalid %s attribute", attrPrice)) + }) + t.Run("invalid capacity attribute", func(t *testing.T) { + n := goodNodeInfo() + + var a netmap.Attribute + a.SetKey(attrCapacity) + a.SetValue("not a number") + n.SetAttributes(append(attrs, a)) + require.ErrorContains(t, new(NodeInfo).ReadFromV2(n), + fmt.Sprintf("invalid %s attribute", attrCapacity)) + }) + t.Run("duplicate attributes", func(t *testing.T) { + n := goodNodeInfo() + + var a netmap.Attribute + a.SetKey("key1") + a.SetValue("value3") + n.SetAttributes(append(attrs, a)) + require.ErrorContains(t, new(NodeInfo).ReadFromV2(n), + "duplicate attributes key1") + }) +} diff --git a/netmap/selector_test.go b/netmap/selector_test.go index ae996dd6..a4ca7748 100644 --- a/netmap/selector_test.go +++ b/netmap/selector_test.go @@ -191,7 +191,7 @@ func TestPlacementPolicy_DeterministicOrder(t *testing.T) { nss[i] = v[i] } - ns := flattenNodes(nss) + ns := flattenNodes(nil, nss) require.Equal(t, 2, len(ns)) return ns[0].Hash(), ns[1].Hash() } diff --git a/object/id/id.go b/object/id/id.go index d1dfe80b..29e6a3ae 100644 --- a/object/id/id.go +++ b/object/id/id.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "crypto/sha256" "fmt" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -167,3 +168,9 @@ func (id *ID) UnmarshalJSON(data []byte) error { return nil } + +// Cmp returns an integer comparing two base58 encoded object ID lexicographically. +// The result will be 0 if id1 == id2, -1 if id1 < id2, and +1 if id1 > id2. +func (id ID) Cmp(id2 ID) int { + return strings.Compare(id.EncodeToString(), id2.EncodeToString()) +} diff --git a/object/id/id_test.go b/object/id/id_test.go index e1a2e267..9df6186d 100644 --- a/object/id/id_test.go +++ b/object/id/id_test.go @@ -3,7 +3,9 @@ package oid import ( "crypto/rand" "crypto/sha256" + "slices" "strconv" + "strings" "testing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" @@ -180,3 +182,16 @@ func TestID_Encode(t *testing.T) { require.Equal(t, emptyID, id.EncodeToString()) }) } + +func TestID_Cmp(t *testing.T) { + id1 := randID(t) + id2 := randID(t) + id3 := randID(t) + + arr := []ID{id1, id2, id3} + + slices.SortFunc(arr, ID.Cmp) + for i := 1; i < len(arr); i++ { + require.NotEqual(t, strings.Compare(arr[i-1].EncodeToString(), arr[i].EncodeToString()), 1, "array is not sorted correctly") + } +} diff --git a/object/object.go b/object/object.go index b98685d7..ed551b5c 100644 --- a/object/object.go +++ b/object/object.go @@ -345,6 +345,32 @@ func (o *Object) SetAttributes(v ...Attribute) { }) } +// SplitHeader returns split header of the object. If it's set, then split header +// defines how the object relates to other objects in a split operation. +func (o *Object) SplitHeader() (splitHeader *SplitHeader) { + if v2 := (*object.Object)(o). + GetHeader(). + GetSplit(); v2 != nil { + splitHeader = NewSplitHeaderFromV2(v2) + } + + return +} + +// SetSplitHeader sets split header. +func (o *Object) SetSplitHeader(v *SplitHeader) { + o.setSplitFields(func(sh *object.SplitHeader) { + v2 := v.ToV2() + + sh.SetParent(v2.GetParent()) + sh.SetPrevious(v2.GetPrevious()) + sh.SetParentHeader(v2.GetParentHeader()) + sh.SetParentSignature(v2.GetParentSignature()) + sh.SetChildren(v2.GetChildren()) + sh.SetSplitID(v2.GetSplitID()) + }) +} + // PreviousID returns identifier of the previous sibling object. func (o *Object) PreviousID() (v oid.ID, isSet bool) { v2 := (*object.Object)(o) diff --git a/object/patch.go b/object/patch.go index 2a066743..9c6ddc4c 100644 --- a/object/patch.go +++ b/object/patch.go @@ -18,6 +18,10 @@ type Patch struct { // filled with NewAttributes. Otherwise, the attributes are just merged. ReplaceAttributes bool + // A new split header which is set to object's header. If `nil`, then split header patching + // is ignored. + NewSplitHeader *SplitHeader + // Payload patch. If this field is not set, then it assumed such Patch patches only // header (see NewAttributes, ReplaceAttributes). PayloadPatch *PayloadPatch @@ -41,6 +45,8 @@ func (p *Patch) ToV2() *v2object.PatchRequestBody { v2.SetNewAttributes(attrs) v2.SetReplaceAttributes(p.ReplaceAttributes) + v2.SetNewSplitHeader(p.NewSplitHeader.ToV2()) + v2.SetPatch(p.PayloadPatch.ToV2()) return v2 @@ -63,6 +69,8 @@ func (p *Patch) FromV2(patch *v2object.PatchRequestBody) { p.ReplaceAttributes = patch.GetReplaceAttributes() + p.NewSplitHeader = NewSplitHeaderFromV2(patch.GetNewSplitHeader()) + if v2patch := patch.GetPatch(); v2patch != nil { p.PayloadPatch = new(PayloadPatch) p.PayloadPatch.FromV2(v2patch) diff --git a/object/patcher/patcher.go b/object/patcher/patcher.go index aad1f2a3..66df9d33 100644 --- a/object/patcher/patcher.go +++ b/object/patcher/patcher.go @@ -11,10 +11,12 @@ import ( ) var ( - ErrOffsetExceedsSize = errors.New("patch offset exceeds object size") - ErrInvalidPatchOffsetOrder = errors.New("invalid patch offset order") - ErrPayloadPatchIsNil = errors.New("nil payload patch") - ErrAttrPatchAlreadyApplied = errors.New("attribute patch already applied") + ErrOffsetExceedsSize = errors.New("patch offset exceeds object size") + ErrInvalidPatchOffsetOrder = errors.New("invalid patch offset order") + ErrPayloadPatchIsNil = errors.New("nil payload patch") + ErrAttrPatchAlreadyApplied = errors.New("attribute patch already applied") + ErrHeaderPatchAlreadyApplied = errors.New("header patch already applied") + ErrSplitHeaderPatchAppliedWithPayloadPatch = errors.New("split header patch applied with payload patch") ) // PatchRes is the result of patch application. @@ -27,13 +29,24 @@ type PatchApplier interface { // ApplyAttributesPatch applies the patch only for the object's attributes. // // ApplyAttributesPatch can't be invoked few times, otherwise it returns `ErrAttrPatchAlreadyApplied` error. + // `ApplyHeaderPatch` and `ApplyAttributesPatch` are mutually exclusive - only one method can be used. // // The call is idempotent for the original header if it's invoked with empty `newAttrs` and // `replaceAttrs = false`. ApplyAttributesPatch(ctx context.Context, newAttrs []objectSDK.Attribute, replaceAttrs bool) error + // ApplyHeaderPatch applies the patch only for the object's attributes. + // + // ApplyHeaderPatch can't be invoked few times, otherwise it returns `ErrHeaderPatchAlreadyApplied` error. + // `ApplyHeaderPatch` and `ApplyAttributesPatch` are mutually exclusive - only one method can be used. + // + // The call is idempotent for the original header if it's invoked with `ApplyHeaderPatchPrm` with not set fields. + ApplyHeaderPatch(ctx context.Context, prm ApplyHeaderPatchPrm) error + // ApplyPayloadPatch applies the patch for the object's payload. // + // ApplyPayloadPatch returns `ErrSplitHeaderPatchAppliedWithPayloadPatch` when attempting to apply it with a split header patch. + // // ApplyPayloadPatch returns `ErrPayloadPatchIsNil` error if patch is nil. ApplyPayloadPatch(ctx context.Context, payloadPatch *objectSDK.PayloadPatch) error @@ -41,6 +54,14 @@ type PatchApplier interface { Close(context.Context) (PatchRes, error) } +type ApplyHeaderPatchPrm struct { + NewSplitHeader *objectSDK.SplitHeader + + NewAttributes []objectSDK.Attribute + + ReplaceAttributes bool +} + // RangeProvider is the interface that provides a method to get original object payload // by a given range. type RangeProvider interface { @@ -61,7 +82,9 @@ type patcher struct { hdr *objectSDK.Object - attrPatchAlreadyApplied bool + hdrPatchAlreadyApplied bool + + splitHeaderPatchAlreadyApplied bool readerBuffSize int } @@ -107,10 +130,10 @@ func New(prm Params) PatchApplier { func (p *patcher) ApplyAttributesPatch(ctx context.Context, newAttrs []objectSDK.Attribute, replaceAttrs bool) error { defer func() { - p.attrPatchAlreadyApplied = true + p.hdrPatchAlreadyApplied = true }() - if p.attrPatchAlreadyApplied { + if p.hdrPatchAlreadyApplied { return ErrAttrPatchAlreadyApplied } @@ -127,7 +150,38 @@ func (p *patcher) ApplyAttributesPatch(ctx context.Context, newAttrs []objectSDK return nil } +func (p *patcher) ApplyHeaderPatch(ctx context.Context, prm ApplyHeaderPatchPrm) error { + defer func() { + p.hdrPatchAlreadyApplied = true + }() + + if p.hdrPatchAlreadyApplied { + return ErrHeaderPatchAlreadyApplied + } + + if prm.NewSplitHeader != nil { + p.hdr.SetSplitHeader(prm.NewSplitHeader) + + p.splitHeaderPatchAlreadyApplied = true + } + + if prm.ReplaceAttributes { + p.hdr.SetAttributes(prm.NewAttributes...) + } else if len(prm.NewAttributes) > 0 { + mergedAttrs := mergeAttributes(prm.NewAttributes, p.hdr.Attributes()) + p.hdr.SetAttributes(mergedAttrs...) + } + + if err := p.objectWriter.WriteHeader(ctx, p.hdr); err != nil { + return fmt.Errorf("writer header: %w", err) + } + return nil +} + func (p *patcher) ApplyPayloadPatch(ctx context.Context, payloadPatch *objectSDK.PayloadPatch) error { + if p.splitHeaderPatchAlreadyApplied { + return ErrSplitHeaderPatchAppliedWithPayloadPatch + } if payloadPatch == nil { return ErrPayloadPatchIsNil } diff --git a/object/patcher/patcher_test.go b/object/patcher/patcher_test.go index 3abb9390..4819b591 100644 --- a/object/patcher/patcher_test.go +++ b/object/patcher/patcher_test.go @@ -106,7 +106,11 @@ func TestPatchRevert(t *testing.T) { patcher := New(prm) - err := patcher.ApplyAttributesPatch(context.Background(), modifPatch.NewAttributes, modifPatch.ReplaceAttributes) + err := patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: modifPatch.NewSplitHeader, + NewAttributes: modifPatch.NewAttributes, + ReplaceAttributes: modifPatch.ReplaceAttributes, + }) require.NoError(t, err) err = patcher.ApplyPayloadPatch(context.Background(), modifPatch.PayloadPatch) @@ -145,7 +149,11 @@ func TestPatchRevert(t *testing.T) { patcher = New(prm) - err = patcher.ApplyAttributesPatch(context.Background(), revertPatch.NewAttributes, revertPatch.ReplaceAttributes) + err = patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: revertPatch.NewSplitHeader, + NewAttributes: revertPatch.NewAttributes, + ReplaceAttributes: revertPatch.ReplaceAttributes, + }) require.NoError(t, err) err = patcher.ApplyPayloadPatch(context.Background(), revertPatch.PayloadPatch) @@ -157,7 +165,7 @@ func TestPatchRevert(t *testing.T) { require.Equal(t, originalObjectPayload, patchedPatchedObj.Payload()) } -func TestPatchRepeatAttributePatch(t *testing.T) { +func TestPatchRepeatHeaderPatch(t *testing.T) { obj, _ := newTestObject() modifPatch := &objectSDK.Patch{} @@ -187,11 +195,142 @@ func TestPatchRepeatAttributePatch(t *testing.T) { patcher := New(prm) - err := patcher.ApplyAttributesPatch(context.Background(), modifPatch.NewAttributes, modifPatch.ReplaceAttributes) + err := patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: modifPatch.NewSplitHeader, + NewAttributes: modifPatch.NewAttributes, + ReplaceAttributes: modifPatch.ReplaceAttributes, + }) require.NoError(t, err) - err = patcher.ApplyAttributesPatch(context.Background(), modifPatch.NewAttributes, modifPatch.ReplaceAttributes) - require.ErrorIs(t, err, ErrAttrPatchAlreadyApplied) + err = patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: modifPatch.NewSplitHeader, + NewAttributes: modifPatch.NewAttributes, + ReplaceAttributes: modifPatch.ReplaceAttributes, + }) + require.ErrorIs(t, err, ErrHeaderPatchAlreadyApplied) +} + +func TestPatchSplitHeader(t *testing.T) { + obj, _ := newTestObject() + + const ( + splitIDStr = "a59c9f87-14bc-4a61-95d1-7eb10f036163" + parentStr = "9cRjAaPqUt5zaDAjBkSCqFfPdkE8dHJ7mtRupRjPWp6E" + previosStr = "6WaTd9HobT4Z52NnKWHAtjqtQu2Ww5xZwNdT4ptshkKE" + ) + + splitID := objectSDK.NewSplitID() + require.NoError(t, splitID.Parse(splitIDStr)) + + var par, prev oid.ID + require.NoError(t, par.DecodeString(parentStr)) + require.NoError(t, prev.DecodeString(previosStr)) + + splitHdr := objectSDK.NewSplitHeader() + splitHdr.SetSplitID(splitID) + splitHdr.SetParentID(par) + splitHdr.SetPreviousID(prev) + + originalObjectPayload := []byte("*******************") + + obj.SetPayload(originalObjectPayload) + obj.SetPayloadSize(uint64(len(originalObjectPayload))) + + rangeProvider := &mockRangeProvider{ + originalObjectPayload: originalObjectPayload, + } + + t.Run("no payload patch", func(t *testing.T) { + patchedObj, _ := newTestObject() + + wr := &mockPatchedObjectWriter{ + obj: patchedObj, + } + + modifPatch := &objectSDK.Patch{ + NewSplitHeader: splitHdr, + } + + prm := Params{ + Header: obj.CutPayload(), + + RangeProvider: rangeProvider, + + ObjectWriter: wr, + } + + patcher := New(prm) + + err := patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: modifPatch.NewSplitHeader, + NewAttributes: modifPatch.NewAttributes, + ReplaceAttributes: modifPatch.ReplaceAttributes, + }) + require.NoError(t, err) + + splitHdrFromPatchedObj := patchedObj.SplitHeader() + require.NotNil(t, splitHdrFromPatchedObj) + + patchObjParID, isSet := splitHdrFromPatchedObj.ParentID() + require.True(t, isSet) + require.True(t, patchObjParID.Equals(par)) + + patchObjPrevID, isSet := splitHdrFromPatchedObj.PreviousID() + require.True(t, isSet) + require.True(t, patchObjPrevID.Equals(prev)) + + require.Equal(t, splitHdrFromPatchedObj.SplitID().String(), splitID.String()) + }) + + t.Run("with payload patch", func(t *testing.T) { + patchedObj, _ := newTestObject() + + wr := &mockPatchedObjectWriter{ + obj: patchedObj, + } + + modifPatch := &objectSDK.Patch{ + NewSplitHeader: splitHdr, + PayloadPatch: &objectSDK.PayloadPatch{ + Range: rangeWithOffestWithLength(10, 0), + Chunk: []byte(""), + }, + } + + prm := Params{ + Header: obj.CutPayload(), + + RangeProvider: rangeProvider, + + ObjectWriter: wr, + } + + patcher := New(prm) + + err := patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: modifPatch.NewSplitHeader, + NewAttributes: modifPatch.NewAttributes, + ReplaceAttributes: modifPatch.ReplaceAttributes, + }) + require.NoError(t, err) + + splitHdrFromPatchedObj := patchedObj.SplitHeader() + require.NotNil(t, splitHdrFromPatchedObj) + + patchObjParID, isSet := splitHdrFromPatchedObj.ParentID() + require.True(t, isSet) + require.True(t, patchObjParID.Equals(par)) + + patchObjPrevID, isSet := splitHdrFromPatchedObj.PreviousID() + require.True(t, isSet) + require.True(t, patchObjPrevID.Equals(prev)) + + require.Equal(t, splitHdrFromPatchedObj.SplitID().String(), splitID.String()) + + err = patcher.ApplyPayloadPatch(context.Background(), modifPatch.PayloadPatch) + require.Error(t, err, ErrSplitHeaderPatchAppliedWithPayloadPatch) + }) + } func TestPatchEmptyPayloadPatch(t *testing.T) { @@ -224,7 +363,11 @@ func TestPatchEmptyPayloadPatch(t *testing.T) { patcher := New(prm) - err := patcher.ApplyAttributesPatch(context.Background(), modifPatch.NewAttributes, modifPatch.ReplaceAttributes) + err := patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: modifPatch.NewSplitHeader, + NewAttributes: modifPatch.NewAttributes, + ReplaceAttributes: modifPatch.ReplaceAttributes, + }) require.NoError(t, err) err = patcher.ApplyPayloadPatch(context.Background(), nil) @@ -599,7 +742,11 @@ func TestPatch(t *testing.T) { for i, patch := range test.patches { if i == 0 { - _ = patcher.ApplyAttributesPatch(context.Background(), patch.NewAttributes, patch.ReplaceAttributes) + _ = patcher.ApplyHeaderPatch(context.Background(), ApplyHeaderPatchPrm{ + NewSplitHeader: patch.NewSplitHeader, + NewAttributes: patch.NewAttributes, + ReplaceAttributes: patch.ReplaceAttributes, + }) } if patch.PayloadPatch == nil { diff --git a/object/split_header.go b/object/split_header.go new file mode 100644 index 00000000..5cbc2149 --- /dev/null +++ b/object/split_header.go @@ -0,0 +1,124 @@ +package object + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +// SplitHeader is an object's header component that defines the relationship +// between this object and other objects if the object is part of a split operation. +type SplitHeader object.SplitHeader + +// NewSplitHeaderFromV2 wraps v2 SplitHeader message to SplitHeader. +func NewSplitHeaderFromV2(v2 *object.SplitHeader) *SplitHeader { + return (*SplitHeader)(v2) +} + +// NewSplitHeader creates blank SplitHeader instance. +func NewSplitHeader() *SplitHeader { + return NewSplitHeaderFromV2(new(object.SplitHeader)) +} + +func (sh *SplitHeader) ToV2() *object.SplitHeader { + return (*object.SplitHeader)(sh) +} + +func (sh *SplitHeader) ParentID() (v oid.ID, isSet bool) { + v2 := (*object.SplitHeader)(sh) + if id := v2.GetParent(); id != nil { + _ = v.ReadFromV2(*id) + isSet = true + } + + return +} + +func (sh *SplitHeader) SetParentID(v oid.ID) { + v2 := new(refs.ObjectID) + v.WriteToV2(v2) + (*object.SplitHeader)(sh).SetParent(v2) +} + +func (sh *SplitHeader) PreviousID() (v oid.ID, isSet bool) { + v2 := (*object.SplitHeader)(sh) + if id := v2.GetPrevious(); id != nil { + _ = v.ReadFromV2(*id) + isSet = true + } + + return +} + +func (sh *SplitHeader) SetPreviousID(v oid.ID) { + v2 := new(refs.ObjectID) + v.WriteToV2(v2) + (*object.SplitHeader)(sh).SetPrevious(v2) +} + +func (sh *SplitHeader) ParentSignature() *frostfscrypto.Signature { + v2 := (*object.SplitHeader)(sh) + if parSigV2 := v2.GetParentSignature(); parSigV2 != nil { + parSig := new(frostfscrypto.Signature) + _ = parSig.ReadFromV2(*parSigV2) + } + + return nil +} + +func (sh *SplitHeader) SetParentSignature(v *frostfscrypto.Signature) { + var parSigV2 *refs.Signature + + if v != nil { + parSigV2 = new(refs.Signature) + v.WriteToV2(parSigV2) + } + + (*object.SplitHeader)(sh).SetParentSignature(parSigV2) +} + +func (sh *SplitHeader) ParentHeader() (parentHeader *Object) { + v2 := (*object.SplitHeader)(sh) + + if parHdr := v2.GetParentHeader(); parHdr != nil { + parentHeader = New() + parentHeader.setHeaderField(func(h *object.Header) { + *h = *v2.GetParentHeader() + }) + } + + return +} + +func (sh *SplitHeader) SetParentHeader(parentHeader *Object) { + (*object.SplitHeader)(sh).SetParentHeader(parentHeader.ToV2().GetHeader()) +} + +func (sh *SplitHeader) Children() (res []oid.ID) { + v2 := (*object.SplitHeader)(sh) + if children := v2.GetChildren(); len(children) > 0 { + res = make([]oid.ID, len(children)) + for i := range children { + _ = res[i].ReadFromV2(children[i]) + } + } + + return +} + +func (sh *SplitHeader) SetChildren(children []oid.ID) { + v2Children := make([]refs.ObjectID, len(children)) + for i := range children { + children[i].WriteToV2(&v2Children[i]) + } + (*object.SplitHeader)(sh).SetChildren(v2Children) +} + +func (sh *SplitHeader) SplitID() *SplitID { + return NewSplitIDFromV2((*object.SplitHeader)(sh).GetSplitID()) +} + +func (sh *SplitHeader) SetSplitID(v *SplitID) { + (*object.SplitHeader)(sh).SetSplitID(v.ToV2()) +} diff --git a/object/transformer/transformer.go b/object/transformer/transformer.go index f7e5cd30..b4695ee3 100644 --- a/object/transformer/transformer.go +++ b/object/transformer/transformer.go @@ -6,6 +6,7 @@ import ( "crypto/sha256" "fmt" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" buffPool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -115,9 +116,14 @@ func fromObject(obj *object.Object) *object.Object { res.SetAttributes(obj.Attributes()...) res.SetType(obj.Type()) - // obj.SetSplitID creates splitHeader but we don't need to do it in case - // of small objects, so we should make nil check. - if obj.SplitID() != nil { + // There are two ways to specify split information: + // 1. Using explicit SplitHeader. Thus, we only propagate whole split information + // if it's already set in the source object (use-case: Patch method). + // 2. Using SplitID - will automatically generate a SplitHeader, but this is not requiered for + // small objects. + if obj.SplitHeader() != nil { + res.SetSplitHeader(obj.SplitHeader()) + } else if obj.SplitID() != nil { res.SetSplitID(obj.SplitID()) } @@ -327,4 +333,11 @@ func (s *payloadSizeLimiter) prepareFirstChild() { s.current.SetAttributes() // attributes will be added to parent in detachParent + + // add expiration epoch to each part + for _, attr := range s.parAttrs { + if attr.Key() == objectV2.SysAttributeExpEpoch { + s.current.SetAttributes(attr) + } + } } diff --git a/pkg/network/address.go b/pkg/network/address.go index 45d59397..dff8614f 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -66,7 +66,7 @@ func (a *Address) FromString(s string) error { if err == nil { a.ma, err = multiaddr.NewMultiaddr(s) if err == nil && hasTLS { - a.ma = a.ma.Encapsulate(tls) + a.ma = a.ma.AppendComponent(tls) } } } diff --git a/pkg/network/tls.go b/pkg/network/tls.go index b3e96637..f30c99e1 100644 --- a/pkg/network/tls.go +++ b/pkg/network/tls.go @@ -9,7 +9,7 @@ const ( ) // tls var is used for (un)wrapping other multiaddrs around TLS multiaddr. -var tls, _ = multiaddr.NewMultiaddr("/" + tlsProtocolName) +var tls, _ = multiaddr.NewComponent(tlsProtocolName, "") // IsTLSEnabled searches for wrapped TLS protocol in multiaddr. func (a Address) IsTLSEnabled() bool { diff --git a/pool/client.go b/pool/client.go new file mode 100644 index 00000000..78650723 --- /dev/null +++ b/pool/client.go @@ -0,0 +1,1303 @@ +package pool + +import ( + "bytes" + "context" + "crypto/ecdsa" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + sdkClient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" +) + +// errPoolClientUnhealthy is an error to indicate that client in pool is unhealthy. +var errPoolClientUnhealthy = errors.New("pool client unhealthy") + +// clientStatusMonitor count error rate and other statistics for connection. +type clientStatusMonitor struct { + logger *zap.Logger + addr string + healthy *atomic.Uint32 + errorThreshold uint32 + + mu sync.RWMutex // protect counters + currentErrorCount uint32 + overallErrorCount uint64 + methods []*MethodStatus +} + +// values for healthy status of clientStatusMonitor. +const ( + // statusUnhealthyOnRequest is set when communication after dialing to the + // endpoint is failed due to immediate or accumulated errors, connection is + // available and pool should close it before re-establishing connection once again. + statusUnhealthyOnRequest = iota + + // statusHealthy is set when connection is ready to be used by the pool. + statusHealthy +) + +// MethodIndex index of method in list of statuses in clientStatusMonitor. +type MethodIndex int + +const ( + methodBalanceGet MethodIndex = iota + methodContainerPut + methodContainerGet + methodContainerList + methodContainerListStream + methodContainerDelete + methodEndpointInfo + methodNetworkInfo + methodNetMapSnapshot + methodObjectPut + methodObjectDelete + methodObjectGet + methodObjectHead + methodObjectRange + methodObjectPatch + methodSessionCreate + methodAPEManagerAddChain + methodAPEManagerRemoveChain + methodAPEManagerListChains + methodLast +) + +// String implements fmt.Stringer. +func (m MethodIndex) String() string { + switch m { + case methodBalanceGet: + return "balanceGet" + case methodContainerPut: + return "containerPut" + case methodContainerGet: + return "containerGet" + case methodContainerList: + return "containerList" + case methodContainerListStream: + return "containerListStream" + case methodContainerDelete: + return "containerDelete" + case methodEndpointInfo: + return "endpointInfo" + case methodNetworkInfo: + return "networkInfo" + case methodNetMapSnapshot: + return "netMapSnapshot" + case methodObjectPut: + return "objectPut" + case methodObjectPatch: + return "objectPatch" + case methodObjectDelete: + return "objectDelete" + case methodObjectGet: + return "objectGet" + case methodObjectHead: + return "objectHead" + case methodObjectRange: + return "objectRange" + case methodSessionCreate: + return "sessionCreate" + case methodAPEManagerAddChain: + return "apeManagerAddChain" + case methodAPEManagerRemoveChain: + return "apeManagerRemoveChain" + case methodAPEManagerListChains: + return "apeManagerListChains" + case methodLast: + return "it's a system name rather than a method" + default: + return "unknown" + } +} + +func newClientStatusMonitor(logger *zap.Logger, addr string, errorThreshold uint32) clientStatusMonitor { + methods := make([]*MethodStatus, methodLast) + for i := methodBalanceGet; i < methodLast; i++ { + methods[i] = &MethodStatus{name: i.String()} + } + + healthy := new(atomic.Uint32) + healthy.Store(statusHealthy) + + return clientStatusMonitor{ + logger: logger, + addr: addr, + healthy: healthy, + errorThreshold: errorThreshold, + methods: methods, + } +} + +// clientWrapper is used by default, alternative implementations are intended for testing purposes only. +type clientWrapper struct { + clientMutex sync.RWMutex + client *sdkClient.Client + dialed bool + prm wrapperPrm + + clientStatusMonitor +} + +// wrapperPrm is params to create clientWrapper. +type wrapperPrm struct { + logger *zap.Logger + address string + key ecdsa.PrivateKey + dialTimeout time.Duration + streamTimeout time.Duration + errorThreshold uint32 + responseInfoCallback func(sdkClient.ResponseMetaInfo) error + poolRequestInfoCallback func(RequestInfo) + dialOptions []grpc.DialOption + + gracefulCloseOnSwitchTimeout time.Duration +} + +// setAddress sets endpoint to connect in FrostFS network. +func (x *wrapperPrm) setAddress(address string) { + x.address = address +} + +// setKey sets sdkClient.Client private key to be used for the protocol communication by default. +func (x *wrapperPrm) setKey(key ecdsa.PrivateKey) { + x.key = key +} + +// setLogger sets sdkClient.Client logger. +func (x *wrapperPrm) setLogger(logger *zap.Logger) { + x.logger = logger +} + +// setDialTimeout sets the timeout for connection to be established. +func (x *wrapperPrm) setDialTimeout(timeout time.Duration) { + x.dialTimeout = timeout +} + +// setStreamTimeout sets the timeout for individual operations in streaming RPC. +func (x *wrapperPrm) setStreamTimeout(timeout time.Duration) { + x.streamTimeout = timeout +} + +// setErrorThreshold sets threshold after reaching which connection is considered unhealthy +// until Pool.startRebalance routing updates its status. +func (x *wrapperPrm) setErrorThreshold(threshold uint32) { + x.errorThreshold = threshold +} + +// setGracefulCloseOnSwitchTimeout specifies the timeout after which unhealthy client be closed during rebalancing +// if it will become healthy back. +// +// See also setErrorThreshold. +func (x *wrapperPrm) setGracefulCloseOnSwitchTimeout(timeout time.Duration) { + x.gracefulCloseOnSwitchTimeout = timeout +} + +// setPoolRequestCallback sets callback that will be invoked after every pool response. +func (x *wrapperPrm) setPoolRequestCallback(f func(RequestInfo)) { + x.poolRequestInfoCallback = f +} + +// setResponseInfoCallback sets callback that will be invoked after every response. +func (x *wrapperPrm) setResponseInfoCallback(f func(sdkClient.ResponseMetaInfo) error) { + x.responseInfoCallback = f +} + +// setGRPCDialOptions sets the gRPC dial options for new gRPC client connection. +func (x *wrapperPrm) setGRPCDialOptions(opts []grpc.DialOption) { + x.dialOptions = opts +} + +// newWrapper creates a clientWrapper that implements the client interface. +func newWrapper(prm wrapperPrm) *clientWrapper { + var cl sdkClient.Client + prmInit := sdkClient.PrmInit{ + Key: prm.key, + ResponseInfoCallback: prm.responseInfoCallback, + } + + cl.Init(prmInit) + + res := &clientWrapper{ + client: &cl, + clientStatusMonitor: newClientStatusMonitor(prm.logger, prm.address, prm.errorThreshold), + prm: prm, + } + + return res +} + +// dial establishes a connection to the server from the FrostFS network. +// Returns an error describing failure reason. If failed, the client +// SHOULD NOT be used. +func (c *clientWrapper) dial(ctx context.Context) error { + cl, err := c.getClient() + if err != nil { + return err + } + + prmDial := sdkClient.PrmDial{ + Endpoint: c.prm.address, + DialTimeout: c.prm.dialTimeout, + StreamTimeout: c.prm.streamTimeout, + GRPCDialOptions: c.prm.dialOptions, + } + + err = cl.Dial(ctx, prmDial) + c.setDialed(err == nil) + if err != nil { + return err + } + + return nil +} + +// restart recreates and redial inner sdk client. +func (c *clientWrapper) restart(ctx context.Context) error { + var cl sdkClient.Client + prmInit := sdkClient.PrmInit{ + Key: c.prm.key, + ResponseInfoCallback: c.prm.responseInfoCallback, + } + + cl.Init(prmInit) + + prmDial := sdkClient.PrmDial{ + Endpoint: c.prm.address, + DialTimeout: c.prm.dialTimeout, + StreamTimeout: c.prm.streamTimeout, + GRPCDialOptions: c.prm.dialOptions, + } + + // if connection is dialed before, to avoid routine / connection leak, + // pool has to close it and then initialize once again. + if c.isDialed() { + c.scheduleGracefulClose() + } + + err := cl.Dial(ctx, prmDial) + c.setDialed(err == nil) + if err != nil { + return err + } + + c.clientMutex.Lock() + c.client = &cl + c.clientMutex.Unlock() + + return nil +} + +func (c *clientWrapper) isDialed() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.dialed +} + +func (c *clientWrapper) setDialed(dialed bool) { + c.mu.Lock() + c.dialed = dialed + c.mu.Unlock() +} + +func (c *clientWrapper) getClient() (*sdkClient.Client, error) { + c.clientMutex.RLock() + defer c.clientMutex.RUnlock() + if c.isHealthy() { + return c.client, nil + } + return nil, errPoolClientUnhealthy +} + +func (c *clientWrapper) getClientRaw() *sdkClient.Client { + c.clientMutex.RLock() + defer c.clientMutex.RUnlock() + return c.client +} + +// balanceGet invokes sdkClient.BalanceGet parse response status to error and return result as is. +func (c *clientWrapper) balanceGet(ctx context.Context, prm PrmBalanceGet) (accounting.Decimal, error) { + cl, err := c.getClient() + if err != nil { + return accounting.Decimal{}, err + } + + cliPrm := sdkClient.PrmBalanceGet{ + Account: prm.account, + } + + start := time.Now() + res, err := cl.BalanceGet(ctx, cliPrm) + c.incRequests(time.Since(start), methodBalanceGet) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return accounting.Decimal{}, fmt.Errorf("balance get on client: %w", err) + } + + return res.Amount(), nil +} + +// containerPut invokes sdkClient.ContainerPut parse response status to error and return result as is. +// It also waits for the container to appear on the network. +func (c *clientWrapper) containerPut(ctx context.Context, prm PrmContainerPut) (cid.ID, error) { + cl, err := c.getClient() + if err != nil { + return cid.ID{}, err + } + + start := time.Now() + res, err := cl.ContainerPut(ctx, prm.ClientParams) + c.incRequests(time.Since(start), methodContainerPut) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return cid.ID{}, fmt.Errorf("container put on client: %w", err) + } + + if prm.WaitParams == nil { + prm.WaitParams = defaultWaitParams() + } + if err = prm.WaitParams.CheckValidity(); err != nil { + return cid.ID{}, fmt.Errorf("invalid wait parameters: %w", err) + } + + idCnr := res.ID() + + getPrm := PrmContainerGet{ + ContainerID: idCnr, + Session: prm.ClientParams.Session, + } + + err = waitForContainerPresence(ctx, c, getPrm, prm.WaitParams) + if err = c.handleError(ctx, nil, err); err != nil { + return cid.ID{}, fmt.Errorf("wait container presence on client: %w", err) + } + + return idCnr, nil +} + +// containerGet invokes sdkClient.ContainerGet parse response status to error and return result as is. +func (c *clientWrapper) containerGet(ctx context.Context, prm PrmContainerGet) (container.Container, error) { + cl, err := c.getClient() + if err != nil { + return container.Container{}, err + } + + cliPrm := sdkClient.PrmContainerGet{ + ContainerID: &prm.ContainerID, + Session: prm.Session, + } + + start := time.Now() + res, err := cl.ContainerGet(ctx, cliPrm) + c.incRequests(time.Since(start), methodContainerGet) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return container.Container{}, fmt.Errorf("container get on client: %w", err) + } + + return res.Container(), nil +} + +// containerList invokes sdkClient.ContainerList parse response status to error and return result as is. +func (c *clientWrapper) containerList(ctx context.Context, prm PrmContainerList) ([]cid.ID, error) { + cl, err := c.getClient() + if err != nil { + return nil, err + } + + cliPrm := sdkClient.PrmContainerList{ + OwnerID: prm.OwnerID, + Session: prm.Session, + } + + start := time.Now() + res, err := cl.ContainerList(ctx, cliPrm) + c.incRequests(time.Since(start), methodContainerList) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return nil, fmt.Errorf("container list on client: %w", err) + } + return res.Containers(), nil +} + +// PrmListStream groups parameters of ListContainersStream operation. +type PrmListStream struct { + OwnerID user.ID + + Session *session.Container +} + +// ResListStream is designed to read list of object identifiers from FrostFS system. +// +// Must be initialized using Pool.ListContainersStream, any other usage is unsafe. +type ResListStream struct { + r *sdkClient.ContainerListReader + elapsedTimeCallback func(time.Duration) + handleError func(context.Context, apistatus.Status, error) error +} + +// Read reads another list of the container identifiers. +func (x *ResListStream) Read(buf []cid.ID) (int, error) { + start := time.Now() + n, ok := x.r.Read(buf) + x.elapsedTimeCallback(time.Since(start)) + if !ok { + res, err := x.r.Close() + if err == nil { + return n, io.EOF + } + + var status apistatus.Status + if res != nil { + status = res.Status() + } + err = x.handleError(nil, status, err) + + return n, err + } + + return n, nil +} + +// Iterate iterates over the list of found container identifiers. +// f can return true to stop iteration earlier. +// +// Returns an error if container can't be read. +func (x *ResListStream) Iterate(f func(cid.ID) bool) error { + start := time.Now() + err := x.r.Iterate(func(id cid.ID) bool { + x.elapsedTimeCallback(time.Since(start)) + stop := f(id) + start = time.Now() + return stop + }) + return err +} + +// Close ends reading list of the matched containers and returns the result of the operation +// along with the final results. Must be called after using the ResListStream. +func (x *ResListStream) Close() { + _, _ = x.r.Close() +} + +// containerList invokes sdkClient.ContainerList parse response status to error and return result as is. +func (c *clientWrapper) containerListStream(ctx context.Context, prm PrmListStream) (ResListStream, error) { + cl, err := c.getClient() + if err != nil { + return ResListStream{}, err + } + + cliPrm := sdkClient.PrmContainerListStream{ + OwnerID: prm.OwnerID, + Session: prm.Session, + } + + start := time.Now() + cnrRdr, err := cl.ContainerListInit(ctx, cliPrm) + c.incRequests(time.Since(start), methodContainerListStream) + if err = c.handleError(ctx, nil, err); err != nil { + return ResListStream{}, fmt.Errorf("init container listing on client: %w", err) + } + return ResListStream{ + r: cnrRdr, + elapsedTimeCallback: func(elapsed time.Duration) { + c.incRequests(elapsed, methodContainerListStream) + }, + handleError: c.handleError, + }, nil +} + +// containerDelete invokes sdkClient.ContainerDelete parse response status to error. +// It also waits for the container to be removed from the network. +func (c *clientWrapper) containerDelete(ctx context.Context, prm PrmContainerDelete) error { + cl, err := c.getClient() + if err != nil { + return err + } + + cliPrm := sdkClient.PrmContainerDelete{ + ContainerID: &prm.ContainerID, + Session: prm.Session, + } + + start := time.Now() + res, err := cl.ContainerDelete(ctx, cliPrm) + c.incRequests(time.Since(start), methodContainerDelete) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return fmt.Errorf("container delete on client: %w", err) + } + + if prm.WaitParams == nil { + prm.WaitParams = defaultWaitParams() + } + if err := prm.WaitParams.CheckValidity(); err != nil { + return fmt.Errorf("invalid wait parameters: %w", err) + } + + getPrm := PrmContainerGet{ + ContainerID: prm.ContainerID, + Session: prm.Session, + } + + return waitForContainerRemoved(ctx, c, getPrm, prm.WaitParams) +} + +// apeManagerAddChain invokes sdkClient.APEManagerAddChain and parse response status to error. +func (c *clientWrapper) apeManagerAddChain(ctx context.Context, prm PrmAddAPEChain) error { + cl, err := c.getClient() + if err != nil { + return err + } + + cliPrm := sdkClient.PrmAPEManagerAddChain{ + ChainTarget: prm.Target, + Chain: prm.Chain, + } + + start := time.Now() + res, err := cl.APEManagerAddChain(ctx, cliPrm) + c.incRequests(time.Since(start), methodAPEManagerAddChain) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return fmt.Errorf("add chain error: %w", err) + } + + return nil +} + +// apeManagerRemoveChain invokes sdkClient.APEManagerRemoveChain and parse response status to error. +func (c *clientWrapper) apeManagerRemoveChain(ctx context.Context, prm PrmRemoveAPEChain) error { + cl, err := c.getClient() + if err != nil { + return err + } + + cliPrm := sdkClient.PrmAPEManagerRemoveChain{ + ChainTarget: prm.Target, + ChainID: prm.ChainID, + } + + start := time.Now() + res, err := cl.APEManagerRemoveChain(ctx, cliPrm) + c.incRequests(time.Since(start), methodAPEManagerRemoveChain) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return fmt.Errorf("remove chain error: %w", err) + } + + return nil +} + +// apeManagerListChains invokes sdkClient.APEManagerListChains. Returns chains and parsed response status to error. +func (c *clientWrapper) apeManagerListChains(ctx context.Context, prm PrmListAPEChains) ([]ape.Chain, error) { + cl, err := c.getClient() + if err != nil { + return nil, err + } + + cliPrm := sdkClient.PrmAPEManagerListChains{ + ChainTarget: prm.Target, + } + + start := time.Now() + res, err := cl.APEManagerListChains(ctx, cliPrm) + c.incRequests(time.Since(start), methodAPEManagerListChains) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return nil, fmt.Errorf("list chains error: %w", err) + } + + return res.Chains, nil +} + +// endpointInfo invokes sdkClient.EndpointInfo parse response status to error and return result as is. +func (c *clientWrapper) endpointInfo(ctx context.Context, _ prmEndpointInfo) (netmap.NodeInfo, error) { + cl, err := c.getClient() + if err != nil { + return netmap.NodeInfo{}, err + } + + return c.endpointInfoRaw(ctx, cl) +} + +func (c *clientWrapper) healthcheck(ctx context.Context) (netmap.NodeInfo, error) { + cl := c.getClientRaw() + return c.endpointInfoRaw(ctx, cl) +} + +func (c *clientWrapper) endpointInfoRaw(ctx context.Context, cl *sdkClient.Client) (netmap.NodeInfo, error) { + start := time.Now() + res, err := cl.EndpointInfo(ctx, sdkClient.PrmEndpointInfo{}) + c.incRequests(time.Since(start), methodEndpointInfo) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return netmap.NodeInfo{}, fmt.Errorf("endpoint info on client: %w", err) + } + + return res.NodeInfo(), nil +} + +// networkInfo invokes sdkClient.NetworkInfo parse response status to error and return result as is. +func (c *clientWrapper) networkInfo(ctx context.Context, _ prmNetworkInfo) (netmap.NetworkInfo, error) { + cl, err := c.getClient() + if err != nil { + return netmap.NetworkInfo{}, err + } + + start := time.Now() + res, err := cl.NetworkInfo(ctx, sdkClient.PrmNetworkInfo{}) + c.incRequests(time.Since(start), methodNetworkInfo) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return netmap.NetworkInfo{}, fmt.Errorf("network info on client: %w", err) + } + + return res.Info(), nil +} + +// networkInfo invokes sdkClient.NetworkInfo parse response status to error and return result as is. +func (c *clientWrapper) netMapSnapshot(ctx context.Context, _ prmNetMapSnapshot) (netmap.NetMap, error) { + cl, err := c.getClient() + if err != nil { + return netmap.NetMap{}, err + } + + start := time.Now() + res, err := cl.NetMapSnapshot(ctx, sdkClient.PrmNetMapSnapshot{}) + c.incRequests(time.Since(start), methodNetMapSnapshot) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return netmap.NetMap{}, fmt.Errorf("network map snapshot on client: %w", err) + } + + return res.NetMap(), nil +} + +// objectPatch patches object in FrostFS. +func (c *clientWrapper) objectPatch(ctx context.Context, prm PrmObjectPatch) (ResPatchObject, error) { + cl, err := c.getClient() + if err != nil { + return ResPatchObject{}, err + } + + start := time.Now() + pObj, err := cl.ObjectPatchInit(ctx, sdkClient.PrmObjectPatch{ + Address: prm.addr, + Session: prm.stoken, + Key: prm.key, + BearerToken: prm.btoken, + MaxChunkLength: prm.maxPayloadPatchChunkLength, + }) + if err = c.handleError(ctx, nil, err); err != nil { + return ResPatchObject{}, fmt.Errorf("init patching on API client: %w", err) + } + c.incRequests(time.Since(start), methodObjectPatch) + + start = time.Now() + attrPatchSuccess := pObj.PatchAttributes(ctx, prm.newAttrs, prm.replaceAttrs) + c.incRequests(time.Since(start), methodObjectPatch) + + if attrPatchSuccess { + start = time.Now() + _ = pObj.PatchPayload(ctx, prm.rng, prm.payload) + c.incRequests(time.Since(start), methodObjectPatch) + } + + res, err := pObj.Close(ctx) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return ResPatchObject{}, fmt.Errorf("client failure: %w", err) + } + + return ResPatchObject{ObjectID: res.ObjectID()}, nil +} + +// objectPut writes object to FrostFS. +func (c *clientWrapper) objectPut(ctx context.Context, prm PrmObjectPut) (ResPutObject, error) { + if prm.bufferMaxSize == 0 { + prm.bufferMaxSize = defaultBufferMaxSizeForPut + } + + if prm.clientCut { + return c.objectPutClientCut(ctx, prm) + } + + return c.objectPutServerCut(ctx, prm) +} + +func (c *clientWrapper) objectPutServerCut(ctx context.Context, prm PrmObjectPut) (ResPutObject, error) { + cl, err := c.getClient() + if err != nil { + return ResPutObject{}, err + } + + cliPrm := sdkClient.PrmObjectPutInit{ + CopiesNumber: prm.copiesNumber, + Session: prm.stoken, + Key: prm.key, + BearerToken: prm.btoken, + } + + start := time.Now() + wObj, err := cl.ObjectPutInit(ctx, cliPrm) + c.incRequests(time.Since(start), methodObjectPut) + if err = c.handleError(ctx, nil, err); err != nil { + return ResPutObject{}, fmt.Errorf("init writing on API client: %w", err) + } + + if wObj.WriteHeader(ctx, prm.hdr) { + sz := prm.hdr.PayloadSize() + + if data := prm.hdr.Payload(); len(data) > 0 { + if prm.payload != nil { + prm.payload = io.MultiReader(bytes.NewReader(data), prm.payload) + } else { + prm.payload = bytes.NewReader(data) + sz = uint64(len(data)) + } + } + + if prm.payload != nil { + if sz == 0 || sz > prm.bufferMaxSize { + sz = prm.bufferMaxSize + } + + buf := make([]byte, sz) + + var n int + + for { + n, err = prm.payload.Read(buf) + if n > 0 { + start = time.Now() + successWrite := wObj.WritePayloadChunk(ctx, buf[:n]) + c.incRequests(time.Since(start), methodObjectPut) + if !successWrite { + break + } + + continue + } + + if errors.Is(err, io.EOF) { + break + } + + return ResPutObject{}, fmt.Errorf("read payload: %w", c.handleError(ctx, nil, err)) + } + } + } + + res, err := wObj.Close(ctx) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { // here err already carries both status and client errors + return ResPutObject{}, fmt.Errorf("client failure: %w", err) + } + + return ResPutObject{ + ObjectID: res.StoredObjectID(), + Epoch: res.StoredEpoch(), + }, nil +} + +func (c *clientWrapper) objectPutClientCut(ctx context.Context, prm PrmObjectPut) (ResPutObject, error) { + putInitPrm := PrmObjectPutClientCutInit{ + PrmObjectPut: prm, + } + + start := time.Now() + wObj, err := c.objectPutInitTransformer(putInitPrm) + c.incRequests(time.Since(start), methodObjectPut) + if err = c.handleError(ctx, nil, err); err != nil { + return ResPutObject{}, fmt.Errorf("init writing on API client: %w", err) + } + + if wObj.WriteHeader(ctx, prm.hdr) { + sz := prm.hdr.PayloadSize() + + if data := prm.hdr.Payload(); len(data) > 0 { + if prm.payload != nil { + prm.payload = io.MultiReader(bytes.NewReader(data), prm.payload) + } else { + prm.payload = bytes.NewReader(data) + sz = uint64(len(data)) + } + } + + if prm.payload != nil { + if sz == 0 || sz > prm.bufferMaxSize { + sz = prm.bufferMaxSize + } + + buf := make([]byte, sz) + + var n int + + for { + n, err = prm.payload.Read(buf) + if n > 0 { + start = time.Now() + successWrite := wObj.WritePayloadChunk(ctx, buf[:n]) + c.incRequests(time.Since(start), methodObjectPut) + if !successWrite { + break + } + + continue + } + + if errors.Is(err, io.EOF) { + break + } + + return ResPutObject{}, fmt.Errorf("read payload: %w", c.handleError(ctx, nil, err)) + } + } + } + + res, err := wObj.Close(ctx) + var st apistatus.Status + if res != nil { + st = res.Status + } + if err = c.handleError(ctx, st, err); err != nil { // here err already carries both status and client errors + return ResPutObject{}, fmt.Errorf("client failure: %w", err) + } + + return ResPutObject{ + ObjectID: res.OID, + Epoch: res.Epoch, + }, nil +} + +// objectDelete invokes sdkClient.ObjectDelete parse response status to error. +func (c *clientWrapper) objectDelete(ctx context.Context, prm PrmObjectDelete) error { + cl, err := c.getClient() + if err != nil { + return err + } + + cnr := prm.addr.Container() + obj := prm.addr.Object() + + cliPrm := sdkClient.PrmObjectDelete{ + BearerToken: prm.btoken, + Session: prm.stoken, + ContainerID: &cnr, + ObjectID: &obj, + Key: prm.key, + } + + start := time.Now() + res, err := cl.ObjectDelete(ctx, cliPrm) + c.incRequests(time.Since(start), methodObjectDelete) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return fmt.Errorf("delete object on client: %w", err) + } + return nil +} + +// objectGet returns reader for object. +func (c *clientWrapper) objectGet(ctx context.Context, prm PrmObjectGet) (ResGetObject, error) { + cl, err := c.getClient() + if err != nil { + return ResGetObject{}, err + } + + prmCnr := prm.addr.Container() + prmObj := prm.addr.Object() + + cliPrm := sdkClient.PrmObjectGet{ + BearerToken: prm.btoken, + Session: prm.stoken, + ContainerID: &prmCnr, + ObjectID: &prmObj, + Key: prm.key, + } + + var res ResGetObject + + rObj, err := cl.ObjectGetInit(ctx, cliPrm) + if err = c.handleError(ctx, nil, err); err != nil { + return ResGetObject{}, fmt.Errorf("init object reading on client: %w", err) + } + + start := time.Now() + successReadHeader := rObj.ReadHeader(&res.Header) + c.incRequests(time.Since(start), methodObjectGet) + if !successReadHeader { + rObjRes, err := rObj.Close() + var st apistatus.Status + if rObjRes != nil { + st = rObjRes.Status() + } + err = c.handleError(ctx, st, err) + return res, fmt.Errorf("read header: %w", err) + } + + res.Payload = &objectReadCloser{ + reader: rObj, + elapsedTimeCallback: func(elapsed time.Duration) { + c.incRequests(elapsed, methodObjectGet) + }, + } + + return res, nil +} + +// objectHead invokes sdkClient.ObjectHead parse response status to error and return result as is. +func (c *clientWrapper) objectHead(ctx context.Context, prm PrmObjectHead) (object.Object, error) { + cl, err := c.getClient() + if err != nil { + return object.Object{}, err + } + + prmCnr := prm.addr.Container() + prmObj := prm.addr.Object() + + cliPrm := sdkClient.PrmObjectHead{ + BearerToken: prm.btoken, + Session: prm.stoken, + Raw: prm.raw, + ContainerID: &prmCnr, + ObjectID: &prmObj, + Key: prm.key, + } + + var obj object.Object + + start := time.Now() + res, err := cl.ObjectHead(ctx, cliPrm) + c.incRequests(time.Since(start), methodObjectHead) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return obj, fmt.Errorf("read object header via client: %w", err) + } + if !res.ReadHeader(&obj) { + return obj, errors.New("missing object header in response") + } + + return obj, nil +} + +// objectRange returns object range reader. +func (c *clientWrapper) objectRange(ctx context.Context, prm PrmObjectRange) (ResObjectRange, error) { + cl, err := c.getClient() + if err != nil { + return ResObjectRange{}, err + } + + prmCnr := prm.addr.Container() + prmObj := prm.addr.Object() + + cliPrm := sdkClient.PrmObjectRange{ + BearerToken: prm.btoken, + Session: prm.stoken, + ContainerID: &prmCnr, + ObjectID: &prmObj, + Offset: prm.off, + Length: prm.ln, + Key: prm.key, + } + + start := time.Now() + res, err := cl.ObjectRangeInit(ctx, cliPrm) + c.incRequests(time.Since(start), methodObjectRange) + if err = c.handleError(ctx, nil, err); err != nil { + return ResObjectRange{}, fmt.Errorf("init payload range reading on client: %w", err) + } + + return ResObjectRange{ + payload: res, + elapsedTimeCallback: func(elapsed time.Duration) { + c.incRequests(elapsed, methodObjectRange) + }, + }, nil +} + +// objectSearch invokes sdkClient.ObjectSearchInit parse response status to error and return result as is. +func (c *clientWrapper) objectSearch(ctx context.Context, prm PrmObjectSearch) (ResObjectSearch, error) { + cl, err := c.getClient() + if err != nil { + return ResObjectSearch{}, err + } + + cliPrm := sdkClient.PrmObjectSearch{ + ContainerID: &prm.cnrID, + Filters: prm.filters, + Session: prm.stoken, + BearerToken: prm.btoken, + Key: prm.key, + } + + res, err := cl.ObjectSearchInit(ctx, cliPrm) + if err = c.handleError(ctx, nil, err); err != nil { + return ResObjectSearch{}, fmt.Errorf("init object searching on client: %w", err) + } + + return ResObjectSearch{r: res, handleError: c.handleError}, nil +} + +// sessionCreate invokes sdkClient.SessionCreate parse response status to error and return result as is. +func (c *clientWrapper) sessionCreate(ctx context.Context, prm prmCreateSession) (resCreateSession, error) { + cl, err := c.getClient() + if err != nil { + return resCreateSession{}, err + } + + cliPrm := sdkClient.PrmSessionCreate{ + Expiration: prm.exp, + Key: &prm.key, + } + + start := time.Now() + res, err := cl.SessionCreate(ctx, cliPrm) + c.incRequests(time.Since(start), methodSessionCreate) + var st apistatus.Status + if res != nil { + st = res.Status() + } + if err = c.handleError(ctx, st, err); err != nil { + return resCreateSession{}, fmt.Errorf("session creation on client: %w", err) + } + + return resCreateSession{ + id: res.ID(), + sessionKey: res.PublicKey(), + }, nil +} + +func (c *clientStatusMonitor) isHealthy() bool { + return c.healthy.Load() == statusHealthy +} + +func (c *clientStatusMonitor) setHealthy() { + c.healthy.Store(statusHealthy) +} + +func (c *clientStatusMonitor) setUnhealthy() { + c.healthy.Store(statusUnhealthyOnRequest) +} + +func (c *clientStatusMonitor) address() string { + return c.addr +} + +func (c *clientStatusMonitor) incErrorRate() { + c.mu.Lock() + c.currentErrorCount++ + c.overallErrorCount++ + + thresholdReached := c.currentErrorCount >= c.errorThreshold + if thresholdReached { + c.setUnhealthy() + c.currentErrorCount = 0 + } + c.mu.Unlock() + + if thresholdReached { + c.log(zapcore.WarnLevel, "error threshold reached", + zap.String("address", c.addr), zap.Uint32("threshold", c.errorThreshold)) + } +} + +func (c *clientStatusMonitor) incErrorRateToUnhealthy(err error) { + c.mu.Lock() + c.currentErrorCount = 0 + c.overallErrorCount++ + c.setUnhealthy() + c.mu.Unlock() + + c.log(zapcore.WarnLevel, "explicitly mark node unhealthy", zap.String("address", c.addr), zap.Error(err)) +} + +func (c *clientStatusMonitor) log(level zapcore.Level, msg string, fields ...zap.Field) { + if c.logger == nil { + return + } + + c.logger.Log(level, msg, fields...) +} + +func (c *clientStatusMonitor) currentErrorRate() uint32 { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentErrorCount +} + +func (c *clientStatusMonitor) overallErrorRate() uint64 { + c.mu.RLock() + defer c.mu.RUnlock() + return c.overallErrorCount +} + +func (c *clientStatusMonitor) methodsStatus() []StatusSnapshot { + result := make([]StatusSnapshot, len(c.methods)) + for i, val := range c.methods { + result[i] = val.Snapshot() + } + + return result +} + +func (c *clientWrapper) incRequests(elapsed time.Duration, method MethodIndex) { + methodStat := c.methods[method] + methodStat.IncRequests(elapsed) + if c.prm.poolRequestInfoCallback != nil { + c.prm.poolRequestInfoCallback(RequestInfo{ + Address: c.prm.address, + Method: method, + Elapsed: elapsed, + }) + } +} + +func (c *clientWrapper) close() error { + if !c.isDialed() { + return nil + } + if cl := c.getClientRaw(); cl != nil { + return cl.Close() + } + return nil +} + +func (c *clientWrapper) scheduleGracefulClose() { + cl := c.getClientRaw() + if cl == nil { + return + } + + time.AfterFunc(c.prm.gracefulCloseOnSwitchTimeout, func() { + if err := cl.Close(); err != nil { + c.log(zap.DebugLevel, "close unhealthy client during rebalance", zap.String("address", c.address()), zap.Error(err)) + } + }) +} + +func (c *clientStatusMonitor) handleError(ctx context.Context, st apistatus.Status, err error) error { + if stErr := apistatus.ErrFromStatus(st); stErr != nil { + switch stErr.(type) { + case *apistatus.ServerInternal, + *apistatus.WrongMagicNumber, + *apistatus.SignatureVerification: + c.incErrorRate() + case *apistatus.NodeUnderMaintenance: + c.incErrorRateToUnhealthy(stErr) + } + + if err == nil { + err = stErr + } + + return err + } + + if err != nil { + if needCountError(ctx, err) { + if sdkClient.IsErrNodeUnderMaintenance(err) { + c.incErrorRateToUnhealthy(err) + } else { + c.incErrorRate() + } + } + + return err + } + + return nil +} + +func needCountError(ctx context.Context, err error) bool { + // non-status logic error that could be returned + // from the SDK client; should not be considered + // as a connection error + var siErr *object.SplitInfoError + if errors.As(err, &siErr) { + return false + } + var eiErr *object.ECInfoError + if errors.As(err, &eiErr) { + return false + } + + if ctx != nil && errors.Is(ctx.Err(), context.Canceled) { + return false + } + + return true +} + +// clientBuilder is a type alias of client constructors which open connection +// to the given endpoint. +type clientBuilder = func(endpoint string) client + +// RequestInfo groups info about pool request. +type RequestInfo struct { + Address string + Method MethodIndex + Elapsed time.Duration +} diff --git a/pool/connection_manager.go b/pool/connection_manager.go new file mode 100644 index 00000000..b142529a --- /dev/null +++ b/pool/connection_manager.go @@ -0,0 +1,330 @@ +package pool + +import ( + "context" + "errors" + "fmt" + "math/rand" + "sort" + "sync" + "sync/atomic" + "time" + + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type innerPool struct { + lock sync.RWMutex + sampler *sampler + clients []client +} + +type connectionManager struct { + innerPools []*innerPool + rebalanceParams rebalanceParameters + clientBuilder clientBuilder + logger *zap.Logger + healthChecker *healthCheck +} + +// newConnectionManager returns an instance of connectionManager configured according to the parameters. +// +// Before using connectionManager, you MUST call Dial. +func newConnectionManager(options InitParameters) (*connectionManager, error) { + if options.key == nil { + return nil, fmt.Errorf("missed required parameter 'Key'") + } + + nodesParams, err := adjustNodeParams(options.nodeParams) + if err != nil { + return nil, err + } + + manager := &connectionManager{ + logger: options.logger, + rebalanceParams: rebalanceParameters{ + nodesParams: nodesParams, + nodeRequestTimeout: options.healthcheckTimeout, + clientRebalanceInterval: options.clientRebalanceInterval, + sessionExpirationDuration: options.sessionExpirationDuration, + }, + clientBuilder: options.clientBuilder, + } + + return manager, nil +} + +func (cm *connectionManager) dial(ctx context.Context) error { + inner := make([]*innerPool, len(cm.rebalanceParams.nodesParams)) + var atLeastOneHealthy bool + + for i, params := range cm.rebalanceParams.nodesParams { + clients := make([]client, len(params.weights)) + for j, addr := range params.addresses { + clients[j] = cm.clientBuilder(addr) + if err := clients[j].dial(ctx); err != nil { + cm.log(zap.WarnLevel, "failed to build client", zap.String("address", addr), zap.Error(err)) + continue + } + atLeastOneHealthy = true + } + source := rand.NewSource(time.Now().UnixNano()) + sampl := newSampler(params.weights, source) + + inner[i] = &innerPool{ + sampler: sampl, + clients: clients, + } + } + + if !atLeastOneHealthy { + return fmt.Errorf("at least one node must be healthy") + } + + cm.innerPools = inner + + cm.healthChecker = newHealthCheck(cm.rebalanceParams.clientRebalanceInterval) + cm.healthChecker.startRebalance(ctx, cm.rebalance) + return nil +} + +func (cm *connectionManager) rebalance(ctx context.Context) { + buffers := make([][]float64, len(cm.rebalanceParams.nodesParams)) + for i, params := range cm.rebalanceParams.nodesParams { + buffers[i] = make([]float64, len(params.weights)) + } + + cm.updateNodesHealth(ctx, buffers) +} + +func (cm *connectionManager) log(level zapcore.Level, msg string, fields ...zap.Field) { + if cm.logger == nil { + return + } + + cm.logger.Log(level, msg, fields...) +} + +func adjustNodeParams(nodeParams []NodeParam) ([]*nodesParam, error) { + if len(nodeParams) == 0 { + return nil, errors.New("no FrostFS peers configured") + } + + nodesParamsMap := make(map[int]*nodesParam) + for _, param := range nodeParams { + nodes, ok := nodesParamsMap[param.priority] + if !ok { + nodes = &nodesParam{priority: param.priority} + } + nodes.addresses = append(nodes.addresses, param.address) + nodes.weights = append(nodes.weights, param.weight) + nodesParamsMap[param.priority] = nodes + } + + nodesParams := make([]*nodesParam, 0, len(nodesParamsMap)) + for _, nodes := range nodesParamsMap { + nodes.weights = adjustWeights(nodes.weights) + nodesParams = append(nodesParams, nodes) + } + + sort.Slice(nodesParams, func(i, j int) bool { + return nodesParams[i].priority < nodesParams[j].priority + }) + + return nodesParams, nil +} + +func (cm *connectionManager) updateNodesHealth(ctx context.Context, buffers [][]float64) { + wg := sync.WaitGroup{} + for i, inner := range cm.innerPools { + wg.Add(1) + + bufferWeights := buffers[i] + go func(i int, _ *innerPool) { + defer wg.Done() + cm.updateInnerNodesHealth(ctx, i, bufferWeights) + }(i, inner) + } + wg.Wait() +} + +func (cm *connectionManager) updateInnerNodesHealth(ctx context.Context, i int, bufferWeights []float64) { + if i > len(cm.innerPools)-1 { + return + } + pool := cm.innerPools[i] + options := cm.rebalanceParams + + healthyChanged := new(atomic.Bool) + wg := sync.WaitGroup{} + + for j, cli := range pool.clients { + wg.Add(1) + go func(j int, cli client) { + defer wg.Done() + + tctx, c := context.WithTimeout(ctx, options.nodeRequestTimeout) + defer c() + + changed, err := restartIfUnhealthy(tctx, cli) + healthy := err == nil + if healthy { + bufferWeights[j] = options.nodesParams[i].weights[j] + } else { + bufferWeights[j] = 0 + } + + if changed { + fields := []zap.Field{zap.String("address", cli.address()), zap.Bool("healthy", healthy)} + if err != nil { + fields = append(fields, zap.String("reason", err.Error())) + } + + cm.log(zap.DebugLevel, "health has changed", fields...) + healthyChanged.Store(true) + } + }(j, cli) + } + wg.Wait() + + if healthyChanged.Load() { + probabilities := adjustWeights(bufferWeights) + source := rand.NewSource(time.Now().UnixNano()) + pool.lock.Lock() + pool.sampler = newSampler(probabilities, source) + pool.lock.Unlock() + } +} + +// restartIfUnhealthy checks healthy status of client and recreate it if status is unhealthy. +// Indicating if status was changed by this function call and returns error that caused unhealthy status. +func restartIfUnhealthy(ctx context.Context, c client) (changed bool, err error) { + defer func() { + if err != nil { + c.setUnhealthy() + } else { + c.setHealthy() + } + }() + + wasHealthy := c.isHealthy() + + if res, err := c.healthcheck(ctx); err == nil { + if res.Status().IsMaintenance() { + return wasHealthy, new(apistatus.NodeUnderMaintenance) + } + + return !wasHealthy, nil + } + + if err = c.restart(ctx); err != nil { + return wasHealthy, err + } + + res, err := c.healthcheck(ctx) + if err != nil { + return wasHealthy, err + } + + if res.Status().IsMaintenance() { + return wasHealthy, new(apistatus.NodeUnderMaintenance) + } + + return !wasHealthy, nil +} + +func adjustWeights(weights []float64) []float64 { + adjusted := make([]float64, len(weights)) + sum := 0.0 + for _, weight := range weights { + sum += weight + } + if sum > 0 { + for i, weight := range weights { + adjusted[i] = weight / sum + } + } + + return adjusted +} + +func (cm *connectionManager) connection() (client, error) { + for _, inner := range cm.innerPools { + cp, err := inner.connection() + if err == nil { + return cp, nil + } + } + + return nil, errors.New("no healthy client") +} + +// iterate iterates over all clients in all innerPools. +func (cm *connectionManager) iterate(cb func(client)) { + for _, inner := range cm.innerPools { + for _, cl := range inner.clients { + if cl.isHealthy() { + cb(cl) + } + } + } +} + +func (p *innerPool) connection() (client, error) { + p.lock.RLock() // need lock because of using p.sampler + defer p.lock.RUnlock() + if len(p.clients) == 1 { + cp := p.clients[0] + if cp.isHealthy() { + return cp, nil + } + return nil, errors.New("no healthy client") + } + attempts := 3 * len(p.clients) + for range attempts { + i := p.sampler.Next() + if cp := p.clients[i]; cp.isHealthy() { + return cp, nil + } + } + + return nil, errors.New("no healthy client") +} + +func (cm connectionManager) Statistic() Statistic { + stat := Statistic{} + for _, inner := range cm.innerPools { + nodes := make([]string, 0, len(inner.clients)) + for _, cl := range inner.clients { + if cl.isHealthy() { + nodes = append(nodes, cl.address()) + } + node := NodeStatistic{ + address: cl.address(), + methods: cl.methodsStatus(), + overallErrors: cl.overallErrorRate(), + currentErrors: cl.currentErrorRate(), + } + stat.nodes = append(stat.nodes, node) + stat.overallErrors += node.overallErrors + } + if len(stat.currentNodes) == 0 { + stat.currentNodes = nodes + } + } + + return stat +} + +func (cm *connectionManager) close() { + cm.healthChecker.stopRebalance() + + // close all clients + for _, pools := range cm.innerPools { + for _, cli := range pools.clients { + _ = cli.close() + } + } +} diff --git a/pool/healthcheck.go b/pool/healthcheck.go new file mode 100644 index 00000000..2f5dec9e --- /dev/null +++ b/pool/healthcheck.go @@ -0,0 +1,47 @@ +package pool + +import ( + "context" + "time" +) + +type healthCheck struct { + cancel context.CancelFunc + closedCh chan struct{} + + clientRebalanceInterval time.Duration +} + +func newHealthCheck(clientRebalanceInterval time.Duration) *healthCheck { + var h healthCheck + h.clientRebalanceInterval = clientRebalanceInterval + h.closedCh = make(chan struct{}) + return &h +} + +// startRebalance runs loop to monitor connection healthy status. +func (h *healthCheck) startRebalance(ctx context.Context, callback func(ctx context.Context)) { + ctx, cancel := context.WithCancel(ctx) + h.cancel = cancel + + go func() { + ticker := time.NewTicker(h.clientRebalanceInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + close(h.closedCh) + return + case <-ticker.C: + callback(ctx) + ticker.Reset(h.clientRebalanceInterval) + } + } + }() +} + +func (h *healthCheck) stopRebalance() { + h.cancel() + <-h.closedCh +} diff --git a/pool/object_put_pool_transformer.go b/pool/object_put_pool_transformer.go index e596aeb3..69559190 100644 --- a/pool/object_put_pool_transformer.go +++ b/pool/object_put_pool_transformer.go @@ -2,6 +2,7 @@ package pool import ( "context" + "fmt" sdkClient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -134,7 +135,7 @@ func (it *internalTarget) putAsStream(ctx context.Context, o *object.Object) err it.res.OID = res.StoredObjectID() it.res.Epoch = res.StoredEpoch() } - return err + return fmt.Errorf("put as stream '%s': %w", it.address, err) } func (it *internalTarget) tryPutSingle(ctx context.Context, o *object.Object) (bool, error) { @@ -151,7 +152,7 @@ func (it *internalTarget) tryPutSingle(ctx context.Context, o *object.Object) (b res, err := it.client.ObjectPutSingle(ctx, cliPrm) if err != nil && status.Code(err) == codes.Unimplemented { - return false, err + return false, fmt.Errorf("address '%s': %w", it.address, err) } if err == nil { @@ -166,5 +167,5 @@ func (it *internalTarget) tryPutSingle(ctx context.Context, o *object.Object) (b } return true, nil } - return true, err + return true, fmt.Errorf("try put single '%s': %w", it.address, err) } diff --git a/pool/pool.go b/pool/pool.go index 1f695773..53bd587d 100644 --- a/pool/pool.go +++ b/pool/pool.go @@ -1,17 +1,12 @@ package pool import ( - "bytes" "context" "crypto/ecdsa" "errors" "fmt" "io" "math" - "math/rand" - "sort" - "sync" - "sync/atomic" "time" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" @@ -31,7 +26,6 @@ import ( "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "google.golang.org/grpc" ) @@ -112,1262 +106,6 @@ type clientStatus interface { methodsStatus() []StatusSnapshot } -// errPoolClientUnhealthy is an error to indicate that client in pool is unhealthy. -var errPoolClientUnhealthy = errors.New("pool client unhealthy") - -// clientStatusMonitor count error rate and other statistics for connection. -type clientStatusMonitor struct { - logger *zap.Logger - addr string - healthy *atomic.Uint32 - errorThreshold uint32 - - mu sync.RWMutex // protect counters - currentErrorCount uint32 - overallErrorCount uint64 - methods []*MethodStatus -} - -// values for healthy status of clientStatusMonitor. -const ( - // statusUnhealthyOnRequest is set when communication after dialing to the - // endpoint is failed due to immediate or accumulated errors, connection is - // available and pool should close it before re-establishing connection once again. - statusUnhealthyOnRequest = iota - - // statusHealthy is set when connection is ready to be used by the pool. - statusHealthy -) - -// MethodIndex index of method in list of statuses in clientStatusMonitor. -type MethodIndex int - -const ( - methodBalanceGet MethodIndex = iota - methodContainerPut - methodContainerGet - methodContainerList - methodContainerListStream - methodContainerDelete - methodEndpointInfo - methodNetworkInfo - methodNetMapSnapshot - methodObjectPut - methodObjectDelete - methodObjectGet - methodObjectHead - methodObjectRange - methodObjectPatch - methodSessionCreate - methodAPEManagerAddChain - methodAPEManagerRemoveChain - methodAPEManagerListChains - methodLast -) - -// String implements fmt.Stringer. -func (m MethodIndex) String() string { - switch m { - case methodBalanceGet: - return "balanceGet" - case methodContainerPut: - return "containerPut" - case methodContainerGet: - return "containerGet" - case methodContainerList: - return "containerList" - case methodContainerDelete: - return "containerDelete" - case methodEndpointInfo: - return "endpointInfo" - case methodNetworkInfo: - return "networkInfo" - case methodNetMapSnapshot: - return "netMapSnapshot" - case methodObjectPut: - return "objectPut" - case methodObjectPatch: - return "objectPatch" - case methodObjectDelete: - return "objectDelete" - case methodObjectGet: - return "objectGet" - case methodObjectHead: - return "objectHead" - case methodObjectRange: - return "objectRange" - case methodSessionCreate: - return "sessionCreate" - case methodAPEManagerAddChain: - return "apeManagerAddChain" - case methodAPEManagerRemoveChain: - return "apeManagerRemoveChain" - case methodAPEManagerListChains: - return "apeManagerListChains" - case methodLast: - return "it's a system name rather than a method" - default: - return "unknown" - } -} - -func newClientStatusMonitor(logger *zap.Logger, addr string, errorThreshold uint32) clientStatusMonitor { - methods := make([]*MethodStatus, methodLast) - for i := methodBalanceGet; i < methodLast; i++ { - methods[i] = &MethodStatus{name: i.String()} - } - - healthy := new(atomic.Uint32) - healthy.Store(statusHealthy) - - return clientStatusMonitor{ - logger: logger, - addr: addr, - healthy: healthy, - errorThreshold: errorThreshold, - methods: methods, - } -} - -// clientWrapper is used by default, alternative implementations are intended for testing purposes only. -type clientWrapper struct { - clientMutex sync.RWMutex - client *sdkClient.Client - dialed bool - prm wrapperPrm - - clientStatusMonitor -} - -// wrapperPrm is params to create clientWrapper. -type wrapperPrm struct { - logger *zap.Logger - address string - key ecdsa.PrivateKey - dialTimeout time.Duration - streamTimeout time.Duration - errorThreshold uint32 - responseInfoCallback func(sdkClient.ResponseMetaInfo) error - poolRequestInfoCallback func(RequestInfo) - dialOptions []grpc.DialOption - - gracefulCloseOnSwitchTimeout time.Duration -} - -// setAddress sets endpoint to connect in FrostFS network. -func (x *wrapperPrm) setAddress(address string) { - x.address = address -} - -// setKey sets sdkClient.Client private key to be used for the protocol communication by default. -func (x *wrapperPrm) setKey(key ecdsa.PrivateKey) { - x.key = key -} - -// setLogger sets sdkClient.Client logger. -func (x *wrapperPrm) setLogger(logger *zap.Logger) { - x.logger = logger -} - -// setDialTimeout sets the timeout for connection to be established. -func (x *wrapperPrm) setDialTimeout(timeout time.Duration) { - x.dialTimeout = timeout -} - -// setStreamTimeout sets the timeout for individual operations in streaming RPC. -func (x *wrapperPrm) setStreamTimeout(timeout time.Duration) { - x.streamTimeout = timeout -} - -// setErrorThreshold sets threshold after reaching which connection is considered unhealthy -// until Pool.startRebalance routing updates its status. -func (x *wrapperPrm) setErrorThreshold(threshold uint32) { - x.errorThreshold = threshold -} - -// setGracefulCloseOnSwitchTimeout specifies the timeout after which unhealthy client be closed during rebalancing -// if it will become healthy back. -// -// See also setErrorThreshold. -func (x *wrapperPrm) setGracefulCloseOnSwitchTimeout(timeout time.Duration) { - x.gracefulCloseOnSwitchTimeout = timeout -} - -// setPoolRequestCallback sets callback that will be invoked after every pool response. -func (x *wrapperPrm) setPoolRequestCallback(f func(RequestInfo)) { - x.poolRequestInfoCallback = f -} - -// setResponseInfoCallback sets callback that will be invoked after every response. -func (x *wrapperPrm) setResponseInfoCallback(f func(sdkClient.ResponseMetaInfo) error) { - x.responseInfoCallback = f -} - -// setGRPCDialOptions sets the gRPC dial options for new gRPC client connection. -func (x *wrapperPrm) setGRPCDialOptions(opts []grpc.DialOption) { - x.dialOptions = opts -} - -// newWrapper creates a clientWrapper that implements the client interface. -func newWrapper(prm wrapperPrm) *clientWrapper { - var cl sdkClient.Client - prmInit := sdkClient.PrmInit{ - Key: prm.key, - ResponseInfoCallback: prm.responseInfoCallback, - } - - cl.Init(prmInit) - - res := &clientWrapper{ - client: &cl, - clientStatusMonitor: newClientStatusMonitor(prm.logger, prm.address, prm.errorThreshold), - prm: prm, - } - - return res -} - -// dial establishes a connection to the server from the FrostFS network. -// Returns an error describing failure reason. If failed, the client -// SHOULD NOT be used. -func (c *clientWrapper) dial(ctx context.Context) error { - cl, err := c.getClient() - if err != nil { - return err - } - - prmDial := sdkClient.PrmDial{ - Endpoint: c.prm.address, - DialTimeout: c.prm.dialTimeout, - StreamTimeout: c.prm.streamTimeout, - GRPCDialOptions: c.prm.dialOptions, - } - - err = cl.Dial(ctx, prmDial) - c.setDialed(err == nil) - if err != nil { - return err - } - - return nil -} - -// restart recreates and redial inner sdk client. -func (c *clientWrapper) restart(ctx context.Context) error { - var cl sdkClient.Client - prmInit := sdkClient.PrmInit{ - Key: c.prm.key, - ResponseInfoCallback: c.prm.responseInfoCallback, - } - - cl.Init(prmInit) - - prmDial := sdkClient.PrmDial{ - Endpoint: c.prm.address, - DialTimeout: c.prm.dialTimeout, - StreamTimeout: c.prm.streamTimeout, - GRPCDialOptions: c.prm.dialOptions, - } - - // if connection is dialed before, to avoid routine / connection leak, - // pool has to close it and then initialize once again. - if c.isDialed() { - c.scheduleGracefulClose() - } - - err := cl.Dial(ctx, prmDial) - c.setDialed(err == nil) - if err != nil { - return err - } - - c.clientMutex.Lock() - c.client = &cl - c.clientMutex.Unlock() - - return nil -} - -func (c *clientWrapper) isDialed() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.dialed -} - -func (c *clientWrapper) setDialed(dialed bool) { - c.mu.Lock() - c.dialed = dialed - c.mu.Unlock() -} - -func (c *clientWrapper) getClient() (*sdkClient.Client, error) { - c.clientMutex.RLock() - defer c.clientMutex.RUnlock() - if c.isHealthy() { - return c.client, nil - } - return nil, errPoolClientUnhealthy -} - -func (c *clientWrapper) getClientRaw() *sdkClient.Client { - c.clientMutex.RLock() - defer c.clientMutex.RUnlock() - return c.client -} - -// balanceGet invokes sdkClient.BalanceGet parse response status to error and return result as is. -func (c *clientWrapper) balanceGet(ctx context.Context, prm PrmBalanceGet) (accounting.Decimal, error) { - cl, err := c.getClient() - if err != nil { - return accounting.Decimal{}, err - } - - cliPrm := sdkClient.PrmBalanceGet{ - Account: prm.account, - } - - start := time.Now() - res, err := cl.BalanceGet(ctx, cliPrm) - c.incRequests(time.Since(start), methodBalanceGet) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return accounting.Decimal{}, fmt.Errorf("balance get on client: %w", err) - } - - return res.Amount(), nil -} - -// containerPut invokes sdkClient.ContainerPut parse response status to error and return result as is. -// It also waits for the container to appear on the network. -func (c *clientWrapper) containerPut(ctx context.Context, prm PrmContainerPut) (cid.ID, error) { - cl, err := c.getClient() - if err != nil { - return cid.ID{}, err - } - - start := time.Now() - res, err := cl.ContainerPut(ctx, prm.ClientParams) - c.incRequests(time.Since(start), methodContainerPut) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return cid.ID{}, fmt.Errorf("container put on client: %w", err) - } - - if prm.WaitParams == nil { - prm.WaitParams = defaultWaitParams() - } - if err = prm.WaitParams.CheckValidity(); err != nil { - return cid.ID{}, fmt.Errorf("invalid wait parameters: %w", err) - } - - idCnr := res.ID() - - getPrm := PrmContainerGet{ - ContainerID: idCnr, - Session: prm.ClientParams.Session, - } - - err = waitForContainerPresence(ctx, c, getPrm, prm.WaitParams) - if err = c.handleError(ctx, nil, err); err != nil { - return cid.ID{}, fmt.Errorf("wait container presence on client: %w", err) - } - - return idCnr, nil -} - -// containerGet invokes sdkClient.ContainerGet parse response status to error and return result as is. -func (c *clientWrapper) containerGet(ctx context.Context, prm PrmContainerGet) (container.Container, error) { - cl, err := c.getClient() - if err != nil { - return container.Container{}, err - } - - cliPrm := sdkClient.PrmContainerGet{ - ContainerID: &prm.ContainerID, - Session: prm.Session, - } - - start := time.Now() - res, err := cl.ContainerGet(ctx, cliPrm) - c.incRequests(time.Since(start), methodContainerGet) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return container.Container{}, fmt.Errorf("container get on client: %w", err) - } - - return res.Container(), nil -} - -// containerList invokes sdkClient.ContainerList parse response status to error and return result as is. -func (c *clientWrapper) containerList(ctx context.Context, prm PrmContainerList) ([]cid.ID, error) { - cl, err := c.getClient() - if err != nil { - return nil, err - } - - cliPrm := sdkClient.PrmContainerList{ - OwnerID: prm.OwnerID, - Session: prm.Session, - } - - start := time.Now() - res, err := cl.ContainerList(ctx, cliPrm) - c.incRequests(time.Since(start), methodContainerList) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return nil, fmt.Errorf("container list on client: %w", err) - } - return res.Containers(), nil -} - -// PrmListStream groups parameters of ListContainersStream operation. -type PrmListStream struct { - OwnerID user.ID - - Session *session.Container -} - -// ResListStream is designed to read list of object identifiers from FrostFS system. -// -// Must be initialized using Pool.ListContainersStream, any other usage is unsafe. -type ResListStream struct { - r *sdkClient.ContainerListReader - handleError func(context.Context, apistatus.Status, error) error -} - -// Read reads another list of the container identifiers. -func (x *ResListStream) Read(buf []cid.ID) (int, error) { - n, ok := x.r.Read(buf) - if !ok { - res, err := x.r.Close() - if err == nil { - return n, io.EOF - } - - var status apistatus.Status - if res != nil { - status = res.Status() - } - err = x.handleError(nil, status, err) - - return n, err - } - - return n, nil -} - -// Iterate iterates over the list of found container identifiers. -// f can return true to stop iteration earlier. -// -// Returns an error if container can't be read. -func (x *ResListStream) Iterate(f func(cid.ID) bool) error { - return x.r.Iterate(f) -} - -// Close ends reading list of the matched containers and returns the result of the operation -// along with the final results. Must be called after using the ResListStream. -func (x *ResListStream) Close() { - _, _ = x.r.Close() -} - -// containerList invokes sdkClient.ContainerList parse response status to error and return result as is. -func (c *clientWrapper) containerListStream(ctx context.Context, prm PrmListStream) (ResListStream, error) { - cl, err := c.getClient() - if err != nil { - return ResListStream{}, err - } - - cliPrm := sdkClient.PrmContainerListStream{ - OwnerID: prm.OwnerID, - Session: prm.Session, - } - - res, err := cl.ContainerListInit(ctx, cliPrm) - if err = c.handleError(ctx, nil, err); err != nil { - return ResListStream{}, fmt.Errorf("init container listing on client: %w", err) - } - return ResListStream{r: res, handleError: c.handleError}, nil -} - -// containerDelete invokes sdkClient.ContainerDelete parse response status to error. -// It also waits for the container to be removed from the network. -func (c *clientWrapper) containerDelete(ctx context.Context, prm PrmContainerDelete) error { - cl, err := c.getClient() - if err != nil { - return err - } - - cliPrm := sdkClient.PrmContainerDelete{ - ContainerID: &prm.ContainerID, - Session: prm.Session, - } - - start := time.Now() - res, err := cl.ContainerDelete(ctx, cliPrm) - c.incRequests(time.Since(start), methodContainerDelete) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return fmt.Errorf("container delete on client: %w", err) - } - - if prm.WaitParams == nil { - prm.WaitParams = defaultWaitParams() - } - if err := prm.WaitParams.CheckValidity(); err != nil { - return fmt.Errorf("invalid wait parameters: %w", err) - } - - getPrm := PrmContainerGet{ - ContainerID: prm.ContainerID, - Session: prm.Session, - } - - return waitForContainerRemoved(ctx, c, getPrm, prm.WaitParams) -} - -// apeManagerAddChain invokes sdkClient.APEManagerAddChain and parse response status to error. -func (c *clientWrapper) apeManagerAddChain(ctx context.Context, prm PrmAddAPEChain) error { - cl, err := c.getClient() - if err != nil { - return err - } - - cliPrm := sdkClient.PrmAPEManagerAddChain{ - ChainTarget: prm.Target, - Chain: prm.Chain, - } - - start := time.Now() - res, err := cl.APEManagerAddChain(ctx, cliPrm) - c.incRequests(time.Since(start), methodAPEManagerAddChain) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return fmt.Errorf("add chain error: %w", err) - } - - return nil -} - -// apeManagerRemoveChain invokes sdkClient.APEManagerRemoveChain and parse response status to error. -func (c *clientWrapper) apeManagerRemoveChain(ctx context.Context, prm PrmRemoveAPEChain) error { - cl, err := c.getClient() - if err != nil { - return err - } - - cliPrm := sdkClient.PrmAPEManagerRemoveChain{ - ChainTarget: prm.Target, - ChainID: prm.ChainID, - } - - start := time.Now() - res, err := cl.APEManagerRemoveChain(ctx, cliPrm) - c.incRequests(time.Since(start), methodAPEManagerRemoveChain) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return fmt.Errorf("remove chain error: %w", err) - } - - return nil -} - -// apeManagerListChains invokes sdkClient.APEManagerListChains. Returns chains and parsed response status to error. -func (c *clientWrapper) apeManagerListChains(ctx context.Context, prm PrmListAPEChains) ([]ape.Chain, error) { - cl, err := c.getClient() - if err != nil { - return nil, err - } - - cliPrm := sdkClient.PrmAPEManagerListChains{ - ChainTarget: prm.Target, - } - - start := time.Now() - res, err := cl.APEManagerListChains(ctx, cliPrm) - c.incRequests(time.Since(start), methodAPEManagerListChains) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return nil, fmt.Errorf("list chains error: %w", err) - } - - return res.Chains, nil -} - -// endpointInfo invokes sdkClient.EndpointInfo parse response status to error and return result as is. -func (c *clientWrapper) endpointInfo(ctx context.Context, _ prmEndpointInfo) (netmap.NodeInfo, error) { - cl, err := c.getClient() - if err != nil { - return netmap.NodeInfo{}, err - } - - return c.endpointInfoRaw(ctx, cl) -} - -func (c *clientWrapper) healthcheck(ctx context.Context) (netmap.NodeInfo, error) { - cl := c.getClientRaw() - return c.endpointInfoRaw(ctx, cl) -} - -func (c *clientWrapper) endpointInfoRaw(ctx context.Context, cl *sdkClient.Client) (netmap.NodeInfo, error) { - start := time.Now() - res, err := cl.EndpointInfo(ctx, sdkClient.PrmEndpointInfo{}) - c.incRequests(time.Since(start), methodEndpointInfo) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return netmap.NodeInfo{}, fmt.Errorf("endpoint info on client: %w", err) - } - - return res.NodeInfo(), nil -} - -// networkInfo invokes sdkClient.NetworkInfo parse response status to error and return result as is. -func (c *clientWrapper) networkInfo(ctx context.Context, _ prmNetworkInfo) (netmap.NetworkInfo, error) { - cl, err := c.getClient() - if err != nil { - return netmap.NetworkInfo{}, err - } - - start := time.Now() - res, err := cl.NetworkInfo(ctx, sdkClient.PrmNetworkInfo{}) - c.incRequests(time.Since(start), methodNetworkInfo) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return netmap.NetworkInfo{}, fmt.Errorf("network info on client: %w", err) - } - - return res.Info(), nil -} - -// networkInfo invokes sdkClient.NetworkInfo parse response status to error and return result as is. -func (c *clientWrapper) netMapSnapshot(ctx context.Context, _ prmNetMapSnapshot) (netmap.NetMap, error) { - cl, err := c.getClient() - if err != nil { - return netmap.NetMap{}, err - } - - start := time.Now() - res, err := cl.NetMapSnapshot(ctx, sdkClient.PrmNetMapSnapshot{}) - c.incRequests(time.Since(start), methodNetMapSnapshot) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return netmap.NetMap{}, fmt.Errorf("network map snapshot on client: %w", err) - } - - return res.NetMap(), nil -} - -// objectPatch patches object in FrostFS. -func (c *clientWrapper) objectPatch(ctx context.Context, prm PrmObjectPatch) (ResPatchObject, error) { - cl, err := c.getClient() - if err != nil { - return ResPatchObject{}, err - } - - start := time.Now() - pObj, err := cl.ObjectPatchInit(ctx, sdkClient.PrmObjectPatch{ - Address: prm.addr, - Session: prm.stoken, - Key: prm.key, - BearerToken: prm.btoken, - MaxChunkLength: prm.maxPayloadPatchChunkLength, - }) - if err = c.handleError(ctx, nil, err); err != nil { - return ResPatchObject{}, fmt.Errorf("init patching on API client: %w", err) - } - c.incRequests(time.Since(start), methodObjectPatch) - - start = time.Now() - attrPatchSuccess := pObj.PatchAttributes(ctx, prm.newAttrs, prm.replaceAttrs) - c.incRequests(time.Since(start), methodObjectPatch) - - if attrPatchSuccess { - start = time.Now() - _ = pObj.PatchPayload(ctx, prm.rng, prm.payload) - c.incRequests(time.Since(start), methodObjectPatch) - } - - res, err := pObj.Close(ctx) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return ResPatchObject{}, fmt.Errorf("client failure: %w", err) - } - - return ResPatchObject{ObjectID: res.ObjectID()}, nil -} - -// objectPut writes object to FrostFS. -func (c *clientWrapper) objectPut(ctx context.Context, prm PrmObjectPut) (ResPutObject, error) { - if prm.bufferMaxSize == 0 { - prm.bufferMaxSize = defaultBufferMaxSizeForPut - } - - if prm.clientCut { - return c.objectPutClientCut(ctx, prm) - } - - return c.objectPutServerCut(ctx, prm) -} - -func (c *clientWrapper) objectPutServerCut(ctx context.Context, prm PrmObjectPut) (ResPutObject, error) { - cl, err := c.getClient() - if err != nil { - return ResPutObject{}, err - } - - cliPrm := sdkClient.PrmObjectPutInit{ - CopiesNumber: prm.copiesNumber, - Session: prm.stoken, - Key: prm.key, - BearerToken: prm.btoken, - } - - start := time.Now() - wObj, err := cl.ObjectPutInit(ctx, cliPrm) - c.incRequests(time.Since(start), methodObjectPut) - if err = c.handleError(ctx, nil, err); err != nil { - return ResPutObject{}, fmt.Errorf("init writing on API client: %w", err) - } - - if wObj.WriteHeader(ctx, prm.hdr) { - sz := prm.hdr.PayloadSize() - - if data := prm.hdr.Payload(); len(data) > 0 { - if prm.payload != nil { - prm.payload = io.MultiReader(bytes.NewReader(data), prm.payload) - } else { - prm.payload = bytes.NewReader(data) - sz = uint64(len(data)) - } - } - - if prm.payload != nil { - if sz == 0 || sz > prm.bufferMaxSize { - sz = prm.bufferMaxSize - } - - buf := make([]byte, sz) - - var n int - - for { - n, err = prm.payload.Read(buf) - if n > 0 { - start = time.Now() - successWrite := wObj.WritePayloadChunk(ctx, buf[:n]) - c.incRequests(time.Since(start), methodObjectPut) - if !successWrite { - break - } - - continue - } - - if errors.Is(err, io.EOF) { - break - } - - return ResPutObject{}, fmt.Errorf("read payload: %w", c.handleError(ctx, nil, err)) - } - } - } - - res, err := wObj.Close(ctx) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { // here err already carries both status and client errors - return ResPutObject{}, fmt.Errorf("client failure: %w", err) - } - - return ResPutObject{ - ObjectID: res.StoredObjectID(), - Epoch: res.StoredEpoch(), - }, nil -} - -func (c *clientWrapper) objectPutClientCut(ctx context.Context, prm PrmObjectPut) (ResPutObject, error) { - putInitPrm := PrmObjectPutClientCutInit{ - PrmObjectPut: prm, - } - - start := time.Now() - wObj, err := c.objectPutInitTransformer(putInitPrm) - c.incRequests(time.Since(start), methodObjectPut) - if err = c.handleError(ctx, nil, err); err != nil { - return ResPutObject{}, fmt.Errorf("init writing on API client: %w", err) - } - - if wObj.WriteHeader(ctx, prm.hdr) { - sz := prm.hdr.PayloadSize() - - if data := prm.hdr.Payload(); len(data) > 0 { - if prm.payload != nil { - prm.payload = io.MultiReader(bytes.NewReader(data), prm.payload) - } else { - prm.payload = bytes.NewReader(data) - sz = uint64(len(data)) - } - } - - if prm.payload != nil { - if sz == 0 || sz > prm.bufferMaxSize { - sz = prm.bufferMaxSize - } - - buf := make([]byte, sz) - - var n int - - for { - n, err = prm.payload.Read(buf) - if n > 0 { - start = time.Now() - successWrite := wObj.WritePayloadChunk(ctx, buf[:n]) - c.incRequests(time.Since(start), methodObjectPut) - if !successWrite { - break - } - - continue - } - - if errors.Is(err, io.EOF) { - break - } - - return ResPutObject{}, fmt.Errorf("read payload: %w", c.handleError(ctx, nil, err)) - } - } - } - - res, err := wObj.Close(ctx) - var st apistatus.Status - if res != nil { - st = res.Status - } - if err = c.handleError(ctx, st, err); err != nil { // here err already carries both status and client errors - return ResPutObject{}, fmt.Errorf("client failure: %w", err) - } - - return ResPutObject{ - ObjectID: res.OID, - Epoch: res.Epoch, - }, nil -} - -// objectDelete invokes sdkClient.ObjectDelete parse response status to error. -func (c *clientWrapper) objectDelete(ctx context.Context, prm PrmObjectDelete) error { - cl, err := c.getClient() - if err != nil { - return err - } - - cnr := prm.addr.Container() - obj := prm.addr.Object() - - cliPrm := sdkClient.PrmObjectDelete{ - BearerToken: prm.btoken, - Session: prm.stoken, - ContainerID: &cnr, - ObjectID: &obj, - Key: prm.key, - } - - start := time.Now() - res, err := cl.ObjectDelete(ctx, cliPrm) - c.incRequests(time.Since(start), methodObjectDelete) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return fmt.Errorf("delete object on client: %w", err) - } - return nil -} - -// objectGet returns reader for object. -func (c *clientWrapper) objectGet(ctx context.Context, prm PrmObjectGet) (ResGetObject, error) { - cl, err := c.getClient() - if err != nil { - return ResGetObject{}, err - } - - prmCnr := prm.addr.Container() - prmObj := prm.addr.Object() - - cliPrm := sdkClient.PrmObjectGet{ - BearerToken: prm.btoken, - Session: prm.stoken, - ContainerID: &prmCnr, - ObjectID: &prmObj, - Key: prm.key, - } - - var res ResGetObject - - rObj, err := cl.ObjectGetInit(ctx, cliPrm) - if err = c.handleError(ctx, nil, err); err != nil { - return ResGetObject{}, fmt.Errorf("init object reading on client: %w", err) - } - - start := time.Now() - successReadHeader := rObj.ReadHeader(&res.Header) - c.incRequests(time.Since(start), methodObjectGet) - if !successReadHeader { - rObjRes, err := rObj.Close() - var st apistatus.Status - if rObjRes != nil { - st = rObjRes.Status() - } - err = c.handleError(ctx, st, err) - return res, fmt.Errorf("read header: %w", err) - } - - res.Payload = &objectReadCloser{ - reader: rObj, - elapsedTimeCallback: func(elapsed time.Duration) { - c.incRequests(elapsed, methodObjectGet) - }, - } - - return res, nil -} - -// objectHead invokes sdkClient.ObjectHead parse response status to error and return result as is. -func (c *clientWrapper) objectHead(ctx context.Context, prm PrmObjectHead) (object.Object, error) { - cl, err := c.getClient() - if err != nil { - return object.Object{}, err - } - - prmCnr := prm.addr.Container() - prmObj := prm.addr.Object() - - cliPrm := sdkClient.PrmObjectHead{ - BearerToken: prm.btoken, - Session: prm.stoken, - Raw: prm.raw, - ContainerID: &prmCnr, - ObjectID: &prmObj, - Key: prm.key, - } - - var obj object.Object - - start := time.Now() - res, err := cl.ObjectHead(ctx, cliPrm) - c.incRequests(time.Since(start), methodObjectHead) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return obj, fmt.Errorf("read object header via client: %w", err) - } - if !res.ReadHeader(&obj) { - return obj, errors.New("missing object header in response") - } - - return obj, nil -} - -// objectRange returns object range reader. -func (c *clientWrapper) objectRange(ctx context.Context, prm PrmObjectRange) (ResObjectRange, error) { - cl, err := c.getClient() - if err != nil { - return ResObjectRange{}, err - } - - prmCnr := prm.addr.Container() - prmObj := prm.addr.Object() - - cliPrm := sdkClient.PrmObjectRange{ - BearerToken: prm.btoken, - Session: prm.stoken, - ContainerID: &prmCnr, - ObjectID: &prmObj, - Offset: prm.off, - Length: prm.ln, - Key: prm.key, - } - - start := time.Now() - res, err := cl.ObjectRangeInit(ctx, cliPrm) - c.incRequests(time.Since(start), methodObjectRange) - if err = c.handleError(ctx, nil, err); err != nil { - return ResObjectRange{}, fmt.Errorf("init payload range reading on client: %w", err) - } - - return ResObjectRange{ - payload: res, - elapsedTimeCallback: func(elapsed time.Duration) { - c.incRequests(elapsed, methodObjectRange) - }, - }, nil -} - -// objectSearch invokes sdkClient.ObjectSearchInit parse response status to error and return result as is. -func (c *clientWrapper) objectSearch(ctx context.Context, prm PrmObjectSearch) (ResObjectSearch, error) { - cl, err := c.getClient() - if err != nil { - return ResObjectSearch{}, err - } - - cliPrm := sdkClient.PrmObjectSearch{ - ContainerID: &prm.cnrID, - Filters: prm.filters, - Session: prm.stoken, - BearerToken: prm.btoken, - Key: prm.key, - } - - res, err := cl.ObjectSearchInit(ctx, cliPrm) - if err = c.handleError(ctx, nil, err); err != nil { - return ResObjectSearch{}, fmt.Errorf("init object searching on client: %w", err) - } - - return ResObjectSearch{r: res, handleError: c.handleError}, nil -} - -// sessionCreate invokes sdkClient.SessionCreate parse response status to error and return result as is. -func (c *clientWrapper) sessionCreate(ctx context.Context, prm prmCreateSession) (resCreateSession, error) { - cl, err := c.getClient() - if err != nil { - return resCreateSession{}, err - } - - cliPrm := sdkClient.PrmSessionCreate{ - Expiration: prm.exp, - Key: &prm.key, - } - - start := time.Now() - res, err := cl.SessionCreate(ctx, cliPrm) - c.incRequests(time.Since(start), methodSessionCreate) - var st apistatus.Status - if res != nil { - st = res.Status() - } - if err = c.handleError(ctx, st, err); err != nil { - return resCreateSession{}, fmt.Errorf("session creation on client: %w", err) - } - - return resCreateSession{ - id: res.ID(), - sessionKey: res.PublicKey(), - }, nil -} - -func (c *clientStatusMonitor) isHealthy() bool { - return c.healthy.Load() == statusHealthy -} - -func (c *clientStatusMonitor) setHealthy() { - c.healthy.Store(statusHealthy) -} - -func (c *clientStatusMonitor) setUnhealthy() { - c.healthy.Store(statusUnhealthyOnRequest) -} - -func (c *clientStatusMonitor) address() string { - return c.addr -} - -func (c *clientStatusMonitor) incErrorRate() { - c.mu.Lock() - c.currentErrorCount++ - c.overallErrorCount++ - - thresholdReached := c.currentErrorCount >= c.errorThreshold - if thresholdReached { - c.setUnhealthy() - c.currentErrorCount = 0 - } - c.mu.Unlock() - - if thresholdReached { - c.log(zapcore.WarnLevel, "error threshold reached", - zap.String("address", c.addr), zap.Uint32("threshold", c.errorThreshold)) - } -} - -func (c *clientStatusMonitor) incErrorRateToUnhealthy(err error) { - c.mu.Lock() - c.currentErrorCount = 0 - c.overallErrorCount++ - c.setUnhealthy() - c.mu.Unlock() - - c.log(zapcore.WarnLevel, "explicitly mark node unhealthy", zap.String("address", c.addr), zap.Error(err)) -} - -func (c *clientStatusMonitor) log(level zapcore.Level, msg string, fields ...zap.Field) { - if c.logger == nil { - return - } - - c.logger.Log(level, msg, fields...) -} - -func (c *clientStatusMonitor) currentErrorRate() uint32 { - c.mu.RLock() - defer c.mu.RUnlock() - return c.currentErrorCount -} - -func (c *clientStatusMonitor) overallErrorRate() uint64 { - c.mu.RLock() - defer c.mu.RUnlock() - return c.overallErrorCount -} - -func (c *clientStatusMonitor) methodsStatus() []StatusSnapshot { - result := make([]StatusSnapshot, len(c.methods)) - for i, val := range c.methods { - result[i] = val.Snapshot() - } - - return result -} - -func (c *clientWrapper) incRequests(elapsed time.Duration, method MethodIndex) { - methodStat := c.methods[method] - methodStat.IncRequests(elapsed) - if c.prm.poolRequestInfoCallback != nil { - c.prm.poolRequestInfoCallback(RequestInfo{ - Address: c.prm.address, - Method: method, - Elapsed: elapsed, - }) - } -} - -func (c *clientWrapper) close() error { - if !c.isDialed() { - return nil - } - if cl := c.getClientRaw(); cl != nil { - return cl.Close() - } - return nil -} - -func (c *clientWrapper) scheduleGracefulClose() { - cl := c.getClientRaw() - if cl == nil { - return - } - - time.AfterFunc(c.prm.gracefulCloseOnSwitchTimeout, func() { - if err := cl.Close(); err != nil { - c.log(zap.DebugLevel, "close unhealthy client during rebalance", zap.String("address", c.address()), zap.Error(err)) - } - }) -} - -func (c *clientStatusMonitor) handleError(ctx context.Context, st apistatus.Status, err error) error { - if stErr := apistatus.ErrFromStatus(st); stErr != nil { - switch stErr.(type) { - case *apistatus.ServerInternal, - *apistatus.WrongMagicNumber, - *apistatus.SignatureVerification: - c.incErrorRate() - case *apistatus.NodeUnderMaintenance: - c.incErrorRateToUnhealthy(stErr) - } - - if err == nil { - err = stErr - } - - return err - } - - if err != nil { - if needCountError(ctx, err) { - if sdkClient.IsErrNodeUnderMaintenance(err) { - c.incErrorRateToUnhealthy(err) - } else { - c.incErrorRate() - } - } - - return err - } - - return nil -} - -func needCountError(ctx context.Context, err error) bool { - // non-status logic error that could be returned - // from the SDK client; should not be considered - // as a connection error - var siErr *object.SplitInfoError - if errors.As(err, &siErr) { - return false - } - var eiErr *object.ECInfoError - if errors.As(err, &eiErr) { - return false - } - - if ctx != nil && errors.Is(ctx.Err(), context.Canceled) { - return false - } - - return true -} - -// clientBuilder is a type alias of client constructors which open connection -// to the given endpoint. -type clientBuilder = func(endpoint string) client - -// RequestInfo groups info about pool request. -type RequestInfo struct { - Address string - Method MethodIndex - Elapsed time.Duration -} - // InitParameters contains values used to initialize connection Pool. type InitParameters struct { key *ecdsa.PrivateKey @@ -2006,25 +744,15 @@ type resCreateSession struct { // // See pool package overview to get some examples. type Pool struct { - innerPools []*innerPool - key *ecdsa.PrivateKey - cancel context.CancelFunc - closedCh chan struct{} - cache *sessionCache - stokenDuration uint64 - rebalanceParams rebalanceParameters - clientBuilder clientBuilder - logger *zap.Logger + manager *connectionManager + logger *zap.Logger + key *ecdsa.PrivateKey + cache *sessionCache + stokenDuration uint64 maxObjectSize uint64 } -type innerPool struct { - lock sync.RWMutex - sampler *sampler - clients []client -} - const ( defaultSessionTokenExpirationDuration = 100 // in epochs defaultErrorThreshold = 100 @@ -2038,17 +766,10 @@ const ( defaultBufferMaxSizeForPut = 3 * 1024 * 1024 // 3 MB ) -// NewPool creates connection pool using parameters. +// NewPool returns an instance of Pool configured according to the parameters. +// +// Before using Pool, you MUST call Dial. func NewPool(options InitParameters) (*Pool, error) { - if options.key == nil { - return nil, fmt.Errorf("missed required parameter 'Key'") - } - - nodesParams, err := adjustNodeParams(options.nodeParams) - if err != nil { - return nil, err - } - cache, err := newCache(options.sessionExpirationDuration) if err != nil { return nil, fmt.Errorf("couldn't create cache: %w", err) @@ -2056,18 +777,17 @@ func NewPool(options InitParameters) (*Pool, error) { fillDefaultInitParams(&options, cache) + manager, err := newConnectionManager(options) + if err != nil { + return nil, err + } + pool := &Pool{ - key: options.key, cache: cache, + key: options.key, logger: options.logger, + manager: manager, stokenDuration: options.sessionExpirationDuration, - rebalanceParams: rebalanceParameters{ - nodesParams: nodesParams, - nodeRequestTimeout: options.healthcheckTimeout, - clientRebalanceInterval: options.clientRebalanceInterval, - sessionExpirationDuration: options.sessionExpirationDuration, - }, - clientBuilder: options.clientBuilder, } return pool, nil @@ -2082,66 +802,39 @@ func NewPool(options InitParameters) (*Pool, error) { // // See also InitParameters.SetClientRebalanceInterval. func (p *Pool) Dial(ctx context.Context) error { - inner := make([]*innerPool, len(p.rebalanceParams.nodesParams)) - var atLeastOneHealthy bool - - for i, params := range p.rebalanceParams.nodesParams { - clients := make([]client, len(params.weights)) - for j, addr := range params.addresses { - clients[j] = p.clientBuilder(addr) - if err := clients[j].dial(ctx); err != nil { - p.log(zap.WarnLevel, "failed to build client", zap.String("address", addr), zap.Error(err)) - continue - } - - var st session.Object - err := initSessionForDuration(ctx, &st, clients[j], p.rebalanceParams.sessionExpirationDuration, *p.key, false) - if err != nil { - clients[j].setUnhealthy() - p.log(zap.WarnLevel, "failed to create frostfs session token for client", - zap.String("address", addr), zap.Error(err)) - continue - } - - _ = p.cache.Put(formCacheKey(addr, p.key, false), st) - atLeastOneHealthy = true - } - source := rand.NewSource(time.Now().UnixNano()) - sampl := newSampler(params.weights, source) - - inner[i] = &innerPool{ - sampler: sampl, - clients: clients, - } + err := p.manager.dial(ctx) + if err != nil { + return err } + var atLeastOneHealthy bool + p.manager.iterate(func(cl client) { + var st session.Object + err := initSessionForDuration(ctx, &st, cl, p.manager.rebalanceParams.sessionExpirationDuration, *p.key, false) + if err != nil { + if p.logger != nil { + p.logger.Log(zap.WarnLevel, "failed to create frostfs session token for client", + zap.String("address", cl.address()), zap.Error(err)) + } + return + } + + _ = p.cache.Put(formCacheKey(cl.address(), p.key, false), st) + atLeastOneHealthy = true + }) + if !atLeastOneHealthy { return fmt.Errorf("at least one node must be healthy") } - ctx, cancel := context.WithCancel(ctx) - p.cancel = cancel - p.closedCh = make(chan struct{}) - p.innerPools = inner - ni, err := p.NetworkInfo(ctx) if err != nil { return fmt.Errorf("get network info for max object size: %w", err) } p.maxObjectSize = ni.MaxObjectSize() - - go p.startRebalance(ctx) return nil } -func (p *Pool) log(level zapcore.Level, msg string, fields ...zap.Field) { - if p.logger == nil { - return - } - - p.logger.Log(level, msg, fields...) -} - func fillDefaultInitParams(params *InitParameters, cache *sessionCache) { if params.sessionExpirationDuration == 0 { params.sessionExpirationDuration = defaultSessionTokenExpirationDuration @@ -2196,204 +889,6 @@ func fillDefaultInitParams(params *InitParameters, cache *sessionCache) { } } -func adjustNodeParams(nodeParams []NodeParam) ([]*nodesParam, error) { - if len(nodeParams) == 0 { - return nil, errors.New("no FrostFS peers configured") - } - - nodesParamsMap := make(map[int]*nodesParam) - for _, param := range nodeParams { - nodes, ok := nodesParamsMap[param.priority] - if !ok { - nodes = &nodesParam{priority: param.priority} - } - nodes.addresses = append(nodes.addresses, param.address) - nodes.weights = append(nodes.weights, param.weight) - nodesParamsMap[param.priority] = nodes - } - - nodesParams := make([]*nodesParam, 0, len(nodesParamsMap)) - for _, nodes := range nodesParamsMap { - nodes.weights = adjustWeights(nodes.weights) - nodesParams = append(nodesParams, nodes) - } - - sort.Slice(nodesParams, func(i, j int) bool { - return nodesParams[i].priority < nodesParams[j].priority - }) - - return nodesParams, nil -} - -// startRebalance runs loop to monitor connection healthy status. -func (p *Pool) startRebalance(ctx context.Context) { - ticker := time.NewTicker(p.rebalanceParams.clientRebalanceInterval) - defer ticker.Stop() - - buffers := make([][]float64, len(p.rebalanceParams.nodesParams)) - for i, params := range p.rebalanceParams.nodesParams { - buffers[i] = make([]float64, len(params.weights)) - } - - for { - select { - case <-ctx.Done(): - close(p.closedCh) - return - case <-ticker.C: - p.updateNodesHealth(ctx, buffers) - ticker.Reset(p.rebalanceParams.clientRebalanceInterval) - } - } -} - -func (p *Pool) updateNodesHealth(ctx context.Context, buffers [][]float64) { - wg := sync.WaitGroup{} - for i, inner := range p.innerPools { - wg.Add(1) - - bufferWeights := buffers[i] - go func(i int, _ *innerPool) { - defer wg.Done() - p.updateInnerNodesHealth(ctx, i, bufferWeights) - }(i, inner) - } - wg.Wait() -} - -func (p *Pool) updateInnerNodesHealth(ctx context.Context, i int, bufferWeights []float64) { - if i > len(p.innerPools)-1 { - return - } - pool := p.innerPools[i] - options := p.rebalanceParams - - healthyChanged := new(atomic.Bool) - wg := sync.WaitGroup{} - - for j, cli := range pool.clients { - wg.Add(1) - go func(j int, cli client) { - defer wg.Done() - - tctx, c := context.WithTimeout(ctx, options.nodeRequestTimeout) - defer c() - - changed, err := restartIfUnhealthy(tctx, cli) - healthy := err == nil - if healthy { - bufferWeights[j] = options.nodesParams[i].weights[j] - } else { - bufferWeights[j] = 0 - p.cache.DeleteByPrefix(cli.address()) - } - - if changed { - fields := []zap.Field{zap.String("address", cli.address()), zap.Bool("healthy", healthy)} - if err != nil { - fields = append(fields, zap.String("reason", err.Error())) - } - - p.log(zap.DebugLevel, "health has changed", fields...) - healthyChanged.Store(true) - } - }(j, cli) - } - wg.Wait() - - if healthyChanged.Load() { - probabilities := adjustWeights(bufferWeights) - source := rand.NewSource(time.Now().UnixNano()) - pool.lock.Lock() - pool.sampler = newSampler(probabilities, source) - pool.lock.Unlock() - } -} - -// restartIfUnhealthy checks healthy status of client and recreate it if status is unhealthy. -// Indicating if status was changed by this function call and returns error that caused unhealthy status. -func restartIfUnhealthy(ctx context.Context, c client) (changed bool, err error) { - defer func() { - if err != nil { - c.setUnhealthy() - } else { - c.setHealthy() - } - }() - - wasHealthy := c.isHealthy() - - if res, err := c.healthcheck(ctx); err == nil { - if res.Status().IsMaintenance() { - return wasHealthy, new(apistatus.NodeUnderMaintenance) - } - - return !wasHealthy, nil - } - - if err = c.restart(ctx); err != nil { - return wasHealthy, err - } - - res, err := c.healthcheck(ctx) - if err != nil { - return wasHealthy, err - } - - if res.Status().IsMaintenance() { - return wasHealthy, new(apistatus.NodeUnderMaintenance) - } - - return !wasHealthy, nil -} - -func adjustWeights(weights []float64) []float64 { - adjusted := make([]float64, len(weights)) - sum := 0.0 - for _, weight := range weights { - sum += weight - } - if sum > 0 { - for i, weight := range weights { - adjusted[i] = weight / sum - } - } - - return adjusted -} - -func (p *Pool) connection() (client, error) { - for _, inner := range p.innerPools { - cp, err := inner.connection() - if err == nil { - return cp, nil - } - } - - return nil, errors.New("no healthy client") -} - -func (p *innerPool) connection() (client, error) { - p.lock.RLock() // need lock because of using p.sampler - defer p.lock.RUnlock() - if len(p.clients) == 1 { - cp := p.clients[0] - if cp.isHealthy() { - return cp, nil - } - return nil, errors.New("no healthy client") - } - attempts := 3 * len(p.clients) - for range attempts { - i := p.sampler.Next() - if cp := p.clients[i]; cp.isHealthy() { - return cp, nil - } - } - - return nil, errors.New("no healthy client") -} - func formCacheKey(address string, key *ecdsa.PrivateKey, clientCut bool) string { k := keys.PrivateKey{PrivateKey: *key} @@ -2484,32 +979,33 @@ type callContext struct { sessionClientCut bool } -func (p *Pool) initCallContext(ctx *callContext, cfg prmCommon, prmCtx prmContext) error { - cp, err := p.connection() +func (p *Pool) initCall(ctxCall *callContext, cfg prmCommon, prmCtx prmContext) error { + p.fillAppropriateKey(&cfg) + cp, err := p.manager.connection() if err != nil { return err } - ctx.key = cfg.key - if ctx.key == nil { + ctxCall.key = cfg.key + if ctxCall.key == nil { // use pool key if caller didn't specify its own - ctx.key = p.key + ctxCall.key = p.key } - ctx.endpoint = cp.address() - ctx.client = cp + ctxCall.endpoint = cp.address() + ctxCall.client = cp - if ctx.sessionTarget != nil && cfg.stoken != nil { - ctx.sessionTarget(*cfg.stoken) + if ctxCall.sessionTarget != nil && cfg.stoken != nil { + ctxCall.sessionTarget(*cfg.stoken) } // note that we don't override session provided by the caller - ctx.sessionDefault = cfg.stoken == nil && prmCtx.defaultSession - if ctx.sessionDefault { - ctx.sessionVerb = prmCtx.verb - ctx.sessionCnr = prmCtx.cnr - ctx.sessionObjSet = prmCtx.objSet - ctx.sessionObjs = prmCtx.objs + ctxCall.sessionDefault = cfg.stoken == nil && prmCtx.defaultSession + if ctxCall.sessionDefault { + ctxCall.sessionVerb = prmCtx.verb + ctxCall.sessionCnr = prmCtx.cnr + ctxCall.sessionObjSet = prmCtx.objSet + ctxCall.sessionObjs = prmCtx.objs } return err @@ -2586,18 +1082,14 @@ type ResPatchObject struct { } // PatchObject patches an object through a remote server using FrostFS API protocol. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) PatchObject(ctx context.Context, prm PrmObjectPatch) (ResPatchObject, error) { var prmCtx prmContext prmCtx.useDefaultSession() prmCtx.useVerb(session.VerbObjectPatch) prmCtx.useContainer(prm.addr.Container()) - p.fillAppropriateKey(&prm.prmCommon) - var ctxCall callContext - if err := p.initCallContext(&ctxCall, prm.prmCommon, prmCtx); err != nil { + if err := p.initCall(&ctxCall, prm.prmCommon, prmCtx); err != nil { return ResPatchObject{}, fmt.Errorf("init call context: %w", err) } @@ -2618,9 +1110,13 @@ func (p *Pool) PatchObject(ctx context.Context, prm PrmObjectPatch) (ResPatchObj return res, nil } +// LatestReceivedEpoch returns the epoch number extracted from the metadata +// of responses received from the client pool's most recent request. +func (p *Pool) LatestReceivedEpoch() uint64 { + return p.cache.Epoch() +} + // PutObject writes an object through a remote server using FrostFS API protocol. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) PutObject(ctx context.Context, prm PrmObjectPut) (ResPutObject, error) { cnr, _ := prm.hdr.ContainerID() @@ -2629,11 +1125,9 @@ func (p *Pool) PutObject(ctx context.Context, prm PrmObjectPut) (ResPutObject, e prmCtx.useVerb(session.VerbObjectPut) prmCtx.useContainer(cnr) - p.fillAppropriateKey(&prm.prmCommon) - var ctxCall callContext ctxCall.sessionClientCut = prm.clientCut - if err := p.initCallContext(&ctxCall, prm.prmCommon, prmCtx); err != nil { + if err := p.initCall(&ctxCall, prm.prmCommon, prmCtx); err != nil { return ResPutObject{}, fmt.Errorf("init call context: %w", err) } @@ -2686,12 +1180,10 @@ func (p *Pool) DeleteObject(ctx context.Context, prm PrmObjectDelete) error { } } - p.fillAppropriateKey(&prm.prmCommon) - var cc callContext cc.sessionTarget = prm.UseSession - err := p.initCallContext(&cc, prm.prmCommon, prmCtx) + err := p.initCall(&cc, prm.prmCommon, prmCtx) if err != nil { return err } @@ -2732,17 +1224,13 @@ type ResGetObject struct { } // GetObject reads object header and initiates reading an object payload through a remote server using FrostFS API protocol. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) GetObject(ctx context.Context, prm PrmObjectGet) (ResGetObject, error) { - p.fillAppropriateKey(&prm.prmCommon) - var cc callContext cc.sessionTarget = prm.UseSession var res ResGetObject - err := p.initCallContext(&cc, prm.prmCommon, prmContext{}) + err := p.initCall(&cc, prm.prmCommon, prmContext{}) if err != nil { return res, err } @@ -2757,17 +1245,13 @@ func (p *Pool) GetObject(ctx context.Context, prm PrmObjectGet) (ResGetObject, e } // HeadObject reads object header through a remote server using FrostFS API protocol. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) HeadObject(ctx context.Context, prm PrmObjectHead) (object.Object, error) { - p.fillAppropriateKey(&prm.prmCommon) - var cc callContext cc.sessionTarget = prm.UseSession var obj object.Object - err := p.initCallContext(&cc, prm.prmCommon, prmContext{}) + err := p.initCall(&cc, prm.prmCommon, prmContext{}) if err != nil { return obj, err } @@ -2808,17 +1292,13 @@ func (x *ResObjectRange) Close() error { // ObjectRange initiates reading an object's payload range through a remote // server using FrostFS API protocol. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) ObjectRange(ctx context.Context, prm PrmObjectRange) (ResObjectRange, error) { - p.fillAppropriateKey(&prm.prmCommon) - var cc callContext cc.sessionTarget = prm.UseSession var res ResObjectRange - err := p.initCallContext(&cc, prm.prmCommon, prmContext{}) + err := p.initCall(&cc, prm.prmCommon, prmContext{}) if err != nil { return res, err } @@ -2879,17 +1359,13 @@ func (x *ResObjectSearch) Close() { // // The call only opens the transmission channel, explicit fetching of matched objects // is done using the ResObjectSearch. Resulting reader must be finally closed. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) SearchObjects(ctx context.Context, prm PrmObjectSearch) (ResObjectSearch, error) { - p.fillAppropriateKey(&prm.prmCommon) - var cc callContext cc.sessionTarget = prm.UseSession var res ResObjectSearch - err := p.initCallContext(&cc, prm.prmCommon, prmContext{}) + err := p.initCall(&cc, prm.prmCommon, prmContext{}) if err != nil { return res, err } @@ -2911,10 +1387,8 @@ func (p *Pool) SearchObjects(ctx context.Context, prm PrmObjectSearch) (ResObjec // waiting timeout: 120s // // Success can be verified by reading by identifier (see GetContainer). -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) PutContainer(ctx context.Context, prm PrmContainerPut) (cid.ID, error) { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return cid.ID{}, err } @@ -2928,10 +1402,8 @@ func (p *Pool) PutContainer(ctx context.Context, prm PrmContainerPut) (cid.ID, e } // GetContainer reads FrostFS container by ID. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) GetContainer(ctx context.Context, prm PrmContainerGet) (container.Container, error) { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return container.Container{}, err } @@ -2946,7 +1418,7 @@ func (p *Pool) GetContainer(ctx context.Context, prm PrmContainerGet) (container // ListContainers requests identifiers of the account-owned containers. func (p *Pool) ListContainers(ctx context.Context, prm PrmContainerList) ([]cid.ID, error) { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return nil, err } @@ -2962,7 +1434,7 @@ func (p *Pool) ListContainers(ctx context.Context, prm PrmContainerList) ([]cid. // ListContainersStream requests identifiers of the account-owned containers. func (p *Pool) ListContainersStream(ctx context.Context, prm PrmListStream) (ResListStream, error) { var res ResListStream - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return res, err } @@ -2984,7 +1456,7 @@ func (p *Pool) ListContainersStream(ctx context.Context, prm PrmListStream) (Res // // Success can be verified by reading by identifier (see GetContainer). func (p *Pool) DeleteContainer(ctx context.Context, prm PrmContainerDelete) error { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return err } @@ -2999,7 +1471,7 @@ func (p *Pool) DeleteContainer(ctx context.Context, prm PrmContainerDelete) erro // AddAPEChain sends a request to set APE chain rules for a target (basically, for a container). func (p *Pool) AddAPEChain(ctx context.Context, prm PrmAddAPEChain) error { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return err } @@ -3014,7 +1486,7 @@ func (p *Pool) AddAPEChain(ctx context.Context, prm PrmAddAPEChain) error { // RemoveAPEChain sends a request to remove APE chain rules for a target. func (p *Pool) RemoveAPEChain(ctx context.Context, prm PrmRemoveAPEChain) error { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return err } @@ -3029,7 +1501,7 @@ func (p *Pool) RemoveAPEChain(ctx context.Context, prm PrmRemoveAPEChain) error // ListAPEChains sends a request to list APE chains rules for a target. func (p *Pool) ListAPEChains(ctx context.Context, prm PrmListAPEChains) ([]ape.Chain, error) { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return nil, err } @@ -3043,10 +1515,8 @@ func (p *Pool) ListAPEChains(ctx context.Context, prm PrmListAPEChains) ([]ape.C } // Balance requests current balance of the FrostFS account. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) Balance(ctx context.Context, prm PrmBalanceGet) (accounting.Decimal, error) { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return accounting.Decimal{}, err } @@ -3061,30 +1531,7 @@ func (p *Pool) Balance(ctx context.Context, prm PrmBalanceGet) (accounting.Decim // Statistic returns connection statistics. func (p Pool) Statistic() Statistic { - stat := Statistic{} - for _, inner := range p.innerPools { - nodes := make([]string, 0, len(inner.clients)) - inner.lock.RLock() - for _, cl := range inner.clients { - if cl.isHealthy() { - nodes = append(nodes, cl.address()) - } - node := NodeStatistic{ - address: cl.address(), - methods: cl.methodsStatus(), - overallErrors: cl.overallErrorRate(), - currentErrors: cl.currentErrorRate(), - } - stat.nodes = append(stat.nodes, node) - stat.overallErrors += node.overallErrors - } - inner.lock.RUnlock() - if len(stat.currentNodes) == 0 { - stat.currentNodes = nodes - } - } - - return stat + return p.manager.Statistic() } // waitForContainerPresence waits until the container is found on the FrostFS network. @@ -3127,10 +1574,8 @@ func waitFor(ctx context.Context, params *WaitParams, condition func(context.Con } // NetworkInfo requests information about the FrostFS network of which the remote server is a part. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) NetworkInfo(ctx context.Context) (netmap.NetworkInfo, error) { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return netmap.NetworkInfo{}, err } @@ -3144,10 +1589,8 @@ func (p *Pool) NetworkInfo(ctx context.Context) (netmap.NetworkInfo, error) { } // NetMapSnapshot requests information about the FrostFS network map. -// -// Main return value MUST NOT be processed on an erroneous return. func (p *Pool) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) { - cp, err := p.connection() + cp, err := p.manager.connection() if err != nil { return netmap.NetMap{}, err } @@ -3162,15 +1605,7 @@ func (p *Pool) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) { // Close closes the Pool and releases all the associated resources. func (p *Pool) Close() { - p.cancel() - <-p.closedCh - - // close all clients - for _, pools := range p.innerPools { - for _, cli := range pools.clients { - _ = cli.close() - } - } + p.manager.close() } // SyncContainerWithNetwork applies network configuration received via @@ -3217,7 +1652,7 @@ func (p *Pool) GetSplitInfo(ctx context.Context, cnrID cid.ID, objID oid.ID, tok case errors.As(err, &errSplit): return errSplit.SplitInfo(), nil case err == nil || errors.As(err, &errECInfo): - return nil, relations.ErrNoSplitInfo + return nil, fmt.Errorf("failed to get raw object header %w", relations.ErrNoSplitInfo) default: return nil, fmt.Errorf("failed to get raw object header: %w", err) } diff --git a/pool/pool_test.go b/pool/pool_test.go index 1362654b..e6af6643 100644 --- a/pool/pool_test.go +++ b/pool/pool_test.go @@ -104,7 +104,7 @@ func TestBuildPoolOneNodeFailed(t *testing.T) { expectedAuthKey := frostfsecdsa.PublicKey(clientKeys[1].PublicKey) condition := func() bool { - cp, err := clientPool.connection() + cp, err := clientPool.manager.connection() if err != nil { return false } @@ -141,7 +141,7 @@ func TestOneNode(t *testing.T) { require.NoError(t, err) t.Cleanup(pool.Close) - cp, err := pool.connection() + cp, err := pool.manager.connection() require.NoError(t, err) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) expectedAuthKey := frostfsecdsa.PublicKey(key1.PublicKey) @@ -171,7 +171,7 @@ func TestTwoNodes(t *testing.T) { require.NoError(t, err) t.Cleanup(pool.Close) - cp, err := pool.connection() + cp, err := pool.manager.connection() require.NoError(t, err) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) require.True(t, assertAuthKeyForAny(st, clientKeys)) @@ -220,13 +220,12 @@ func TestOneOfTwoFailed(t *testing.T) { err = pool.Dial(context.Background()) require.NoError(t, err) - require.NoError(t, err) t.Cleanup(pool.Close) time.Sleep(2 * time.Second) for range 5 { - cp, err := pool.connection() + cp, err := pool.manager.connection() require.NoError(t, err) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) require.True(t, assertAuthKeyForAny(st, clientKeys)) @@ -369,11 +368,11 @@ func TestUpdateNodesHealth(t *testing.T) { tc.prepareCli(cli) p, log := newPool(t, cli) - p.updateNodesHealth(ctx, [][]float64{{1}}) + p.manager.updateNodesHealth(ctx, [][]float64{{1}}) changed := tc.wasHealthy != tc.willHealthy require.Equalf(t, tc.willHealthy, cli.isHealthy(), "healthy status should be: %v", tc.willHealthy) - require.Equalf(t, changed, 1 == log.Len(), "healthy status should be changed: %v", changed) + require.Equalf(t, changed, log.Len() == 1, "healthy status should be changed: %v", changed) }) } } @@ -385,19 +384,19 @@ func newPool(t *testing.T, cli *mockClient) (*Pool, *observer.ObservedLogs) { require.NoError(t, err) return &Pool{ - innerPools: []*innerPool{{ - sampler: newSampler([]float64{1}, rand.NewSource(0)), - clients: []client{cli}, - }}, - cache: cache, - key: newPrivateKey(t), - closedCh: make(chan struct{}), - rebalanceParams: rebalanceParameters{ - nodesParams: []*nodesParam{{1, []string{"peer0"}, []float64{1}}}, - nodeRequestTimeout: time.Second, - clientRebalanceInterval: 200 * time.Millisecond, - }, - logger: log, + cache: cache, + key: newPrivateKey(t), + manager: &connectionManager{ + innerPools: []*innerPool{{ + sampler: newSampler([]float64{1}, rand.NewSource(0)), + clients: []client{cli}, + }}, + healthChecker: newHealthCheck(200 * time.Millisecond), + rebalanceParams: rebalanceParameters{ + nodesParams: []*nodesParam{{1, []string{"peer0"}, []float64{1}}}, + nodeRequestTimeout: time.Second, + }, + logger: log}, }, observedLog } @@ -435,7 +434,7 @@ func TestTwoFailed(t *testing.T) { time.Sleep(2 * time.Second) - _, err = pool.connection() + _, err = pool.manager.connection() require.Error(t, err) require.Contains(t, err.Error(), "no healthy") } @@ -469,7 +468,7 @@ func TestSessionCache(t *testing.T) { t.Cleanup(pool.Close) // cache must contain session token - cp, err := pool.connection() + cp, err := pool.manager.connection() require.NoError(t, err) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) require.True(t, st.AssertAuthKey(&expectedAuthKey)) @@ -482,7 +481,7 @@ func TestSessionCache(t *testing.T) { require.Error(t, err) // cache must not contain session token - cp, err = pool.connection() + cp, err = pool.manager.connection() require.NoError(t, err) _, ok := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) require.False(t, ok) @@ -494,7 +493,7 @@ func TestSessionCache(t *testing.T) { require.NoError(t, err) // cache must contain session token - cp, err = pool.connection() + cp, err = pool.manager.connection() require.NoError(t, err) st, _ = pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) require.True(t, st.AssertAuthKey(&expectedAuthKey)) @@ -538,7 +537,7 @@ func TestPriority(t *testing.T) { expectedAuthKey1 := frostfsecdsa.PublicKey(clientKeys[0].PublicKey) firstNode := func() bool { - cp, err := pool.connection() + cp, err := pool.manager.connection() require.NoError(t, err) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) return st.AssertAuthKey(&expectedAuthKey1) @@ -546,7 +545,7 @@ func TestPriority(t *testing.T) { expectedAuthKey2 := frostfsecdsa.PublicKey(clientKeys[1].PublicKey) secondNode := func() bool { - cp, err := pool.connection() + cp, err := pool.manager.connection() require.NoError(t, err) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) return st.AssertAuthKey(&expectedAuthKey2) @@ -583,7 +582,7 @@ func TestSessionCacheWithKey(t *testing.T) { require.NoError(t, err) // cache must contain session token - cp, err := pool.connection() + cp, err := pool.manager.connection() require.NoError(t, err) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) require.True(t, st.AssertAuthKey(&expectedAuthKey)) @@ -636,9 +635,8 @@ func TestSessionTokenOwner(t *testing.T) { cc.sessionTarget = func(tok session.Object) { tkn = tok } - err = p.initCallContext(&cc, prm, prmCtx) + err = p.initCall(&cc, prm, prmCtx) require.NoError(t, err) - err = p.openDefaultSession(ctx, &cc) require.NoError(t, err) require.True(t, tkn.VerifySignature()) @@ -922,14 +920,14 @@ func TestSwitchAfterErrorThreshold(t *testing.T) { t.Cleanup(pool.Close) for range errorThreshold { - conn, err := pool.connection() + conn, err := pool.manager.connection() require.NoError(t, err) require.Equal(t, nodes[0].address, conn.address()) _, err = conn.objectGet(ctx, PrmObjectGet{}) require.Error(t, err) } - conn, err := pool.connection() + conn, err := pool.manager.connection() require.NoError(t, err) require.Equal(t, nodes[1].address, conn.address()) _, err = conn.objectGet(ctx, PrmObjectGet{}) diff --git a/pool/sampler_test.go b/pool/sampler_test.go index ab06e0f4..b0860b1f 100644 --- a/pool/sampler_test.go +++ b/pool/sampler_test.go @@ -47,9 +47,6 @@ func TestHealthyReweight(t *testing.T) { buffer = make([]float64, len(weights)) ) - cache, err := newCache(0) - require.NoError(t, err) - client1 := newMockClient(names[0], *newPrivateKey(t)) client1.errOnDial() @@ -59,22 +56,20 @@ func TestHealthyReweight(t *testing.T) { sampler: newSampler(weights, rand.NewSource(0)), clients: []client{client1, client2}, } - p := &Pool{ + cm := &connectionManager{ innerPools: []*innerPool{inner}, - cache: cache, - key: newPrivateKey(t), rebalanceParams: rebalanceParameters{nodesParams: []*nodesParam{{weights: weights}}}, } // check getting first node connection before rebalance happened - connection0, err := p.connection() + connection0, err := cm.connection() require.NoError(t, err) mock0 := connection0.(*mockClient) require.Equal(t, names[0], mock0.address()) - p.updateInnerNodesHealth(context.TODO(), 0, buffer) + cm.updateInnerNodesHealth(context.TODO(), 0, buffer) - connection1, err := p.connection() + connection1, err := cm.connection() require.NoError(t, err) mock1 := connection1.(*mockClient) require.Equal(t, names[1], mock1.address()) @@ -84,10 +79,10 @@ func TestHealthyReweight(t *testing.T) { inner.clients[0] = newMockClient(names[0], *newPrivateKey(t)) inner.lock.Unlock() - p.updateInnerNodesHealth(context.TODO(), 0, buffer) + cm.updateInnerNodesHealth(context.TODO(), 0, buffer) inner.sampler = newSampler(weights, rand.NewSource(0)) - connection0, err = p.connection() + connection0, err = cm.connection() require.NoError(t, err) mock0 = connection0.(*mockClient) require.Equal(t, names[0], mock0.address()) @@ -108,12 +103,12 @@ func TestHealthyNoReweight(t *testing.T) { newMockClient(names[1], *newPrivateKey(t)), }, } - p := &Pool{ + cm := &connectionManager{ innerPools: []*innerPool{inner}, rebalanceParams: rebalanceParameters{nodesParams: []*nodesParam{{weights: weights}}}, } - p.updateInnerNodesHealth(context.TODO(), 0, buffer) + cm.updateInnerNodesHealth(context.TODO(), 0, buffer) inner.lock.RLock() defer inner.lock.RUnlock() diff --git a/pool/statistic.go b/pool/statistic.go index 40da88ff..b9c24303 100644 --- a/pool/statistic.go +++ b/pool/statistic.go @@ -97,6 +97,11 @@ func (n NodeStatistic) AverageListContainer() time.Duration { return n.averageTime(methodContainerList) } +// AverageListContainerStream returns average time to perform ContainerListStream request. +func (n NodeStatistic) AverageListContainerStream() time.Duration { + return n.averageTime(methodContainerListStream) +} + // AverageDeleteContainer returns average time to perform ContainerDelete request. func (n NodeStatistic) AverageDeleteContainer() time.Duration { return n.averageTime(methodContainerDelete) diff --git a/pool/tree/circuitbreaker.go b/pool/tree/circuitbreaker.go new file mode 100644 index 00000000..82615a66 --- /dev/null +++ b/pool/tree/circuitbreaker.go @@ -0,0 +1,82 @@ +package tree + +import ( + "errors" + "sync" + "time" +) + +type ( + circuitBreaker struct { + breakDuration time.Duration + threshold int + + mu sync.RWMutex + state map[uint64]state + } + + state struct { + counter int + breakTimestamp time.Time + } +) + +var ErrCBClosed = errors.New("circuit breaker is closed") + +func newCircuitBreaker(breakDuration time.Duration, threshold int) *circuitBreaker { + return &circuitBreaker{ + breakDuration: breakDuration, + threshold: threshold, + state: make(map[uint64]state), + } +} + +func (cb *circuitBreaker) checkBreak(id uint64) error { + cb.mu.RLock() + s, ok := cb.state[id] + cb.mu.RUnlock() + + if ok && time.Since(s.breakTimestamp) < cb.breakDuration { + return ErrCBClosed + } + + return nil +} + +func (cb *circuitBreaker) openBreak(id uint64) { + cb.mu.Lock() + defer cb.mu.Unlock() + delete(cb.state, id) +} + +func (cb *circuitBreaker) incError(id uint64) { + cb.mu.Lock() + defer cb.mu.Unlock() + + s := cb.state[id] + + s.counter++ + if s.counter >= cb.threshold { + s.counter = cb.threshold + if time.Since(s.breakTimestamp) >= cb.breakDuration { + s.breakTimestamp = time.Now() + } + } + + cb.state[id] = s +} + +func (cb *circuitBreaker) Do(id uint64, f func() error) error { + if err := cb.checkBreak(id); err != nil { + return err + } + + err := f() + if err == nil { + cb.openBreak(id) + } else { + cb.incError(id) + } + + return err +} diff --git a/pool/tree/circuitbreaker_test.go b/pool/tree/circuitbreaker_test.go new file mode 100644 index 00000000..c616d1b6 --- /dev/null +++ b/pool/tree/circuitbreaker_test.go @@ -0,0 +1,68 @@ +package tree + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestCircuitBreaker(t *testing.T) { + remoteErr := errors.New("service is being synchronized") + breakDuration := 1 * time.Second + threshold := 10 + cb := newCircuitBreaker(breakDuration, threshold) + + // Hit threshold + for i := 0; i < threshold; i++ { + err := cb.Do(1, func() error { return remoteErr }) + require.ErrorIs(t, err, remoteErr) + } + + // Different client should not be affected by threshold + require.NoError(t, cb.Do(2, func() error { return nil })) + + // Immediate request should return circuit breaker error + require.ErrorIs(t, cb.Do(1, func() error { return nil }), ErrCBClosed) + + // Request after breakDuration should be ok + time.Sleep(breakDuration) + require.NoError(t, cb.Do(1, func() error { return nil })) + + // Try hitting threshold one more time after break duration + for i := 0; i < threshold; i++ { + err := cb.Do(1, func() error { return remoteErr }) + require.ErrorIs(t, err, remoteErr) + } + + // Immediate request should return circuit breaker error + require.ErrorIs(t, cb.Do(1, func() error { return nil }), ErrCBClosed) +} + +func TestCircuitBreakerNoBlock(t *testing.T) { + remoteErr := errors.New("service is being synchronized") + funcDuration := 200 * time.Millisecond + threshold := 100 + cb := newCircuitBreaker(10*funcDuration, threshold) + + slowFunc := func() error { + time.Sleep(funcDuration) + return remoteErr + } + + for i := 0; i < threshold; i++ { + // run in multiple goroutines Do function + go func() { + cb.Do(1, slowFunc) + }() + } + + time.Sleep(funcDuration) + + // eventually at most after one more func duration circuit breaker will be + // closed and not blocked by slow func execution under mutex + require.Eventually(t, func() bool { + return errors.Is(cb.Do(1, func() error { return nil }), ErrCBClosed) + }, funcDuration, funcDuration/10) +} diff --git a/pool/tree/client.go b/pool/tree/client.go index b93b5e63..b7682d9b 100644 --- a/pool/tree/client.go +++ b/pool/tree/client.go @@ -49,11 +49,11 @@ func (c *treeClient) dial(ctx context.Context) error { var err error c.client, err = c.createClient() if err != nil { - return err + return fmt.Errorf("couldn't dial '%s': %w", c.address, err) } if _, err = rpcapi.Healthcheck(c.client, &tree.HealthcheckRequest{}, rpcclient.WithContext(ctx)); err != nil { - return fmt.Errorf("healthcheck tree service: %w", err) + return fmt.Errorf("healthcheck tree service '%s': %w", c.address, err) } c.healthy = true @@ -127,5 +127,9 @@ func (c *treeClient) close() error { if c.client == nil || c.client.Conn() == nil { return nil } - return c.client.Conn().Close() + err := c.client.Conn().Close() + if err != nil { + return fmt.Errorf("address '%s': %w", c.address, err) + } + return nil } diff --git a/pool/tree/pool.go b/pool/tree/pool.go index ddfdc0ef..dd5d8262 100644 --- a/pool/tree/pool.go +++ b/pool/tree/pool.go @@ -24,10 +24,12 @@ import ( ) const ( - defaultRebalanceInterval = 15 * time.Second - defaultHealthcheckTimeout = 4 * time.Second - defaultDialTimeout = 5 * time.Second - defaultStreamTimeout = 10 * time.Second + defaultRebalanceInterval = 15 * time.Second + defaultHealthcheckTimeout = 4 * time.Second + defaultDialTimeout = 5 * time.Second + defaultStreamTimeout = 10 * time.Second + defaultCircuitBreakerDuration = 10 * time.Second + defaultCircuitBreakerTreshold = 10 ) // SubTreeSort defines an order of nodes returned from GetSubTree RPC. @@ -76,6 +78,8 @@ type InitParameters struct { dialOptions []grpc.DialOption maxRequestAttempts int netMapInfoSource NetMapInfoSource + circuitBreakerThreshold int + circuitBreakerDuration time.Duration } type NetMapInfoSource interface { @@ -117,6 +121,8 @@ type Pool struct { // * retry in case of request failure (see Pool.requestWithRetry) // startIndices will be used if netMapInfoSource is not set startIndices [2]int + // circuit breaker for dial operations when netmap is being used + cb *circuitBreaker } type innerPool struct { @@ -248,6 +254,10 @@ func NewPool(options InitParameters) (*Pool, error) { methods: methods, netMapInfoSource: options.netMapInfoSource, clientMap: make(map[uint64]client), + cb: newCircuitBreaker( + options.circuitBreakerDuration, + options.circuitBreakerThreshold, + ), } if options.netMapInfoSource == nil { @@ -285,7 +295,7 @@ func (p *Pool) Dial(ctx context.Context) error { for j, node := range nodes { clients[j] = newTreeClient(node.Address(), p.dialOptions, p.nodeDialTimeout, p.streamTimeout) if err := clients[j].dial(ctx); err != nil { - p.log(zap.WarnLevel, "failed to dial tree client", zap.String("address", node.Address()), zap.Error(err)) + p.log(zap.WarnLevel, "failed to dial tree client", zap.Error(err)) continue } @@ -366,6 +376,18 @@ func (x *InitParameters) SetNetMapInfoSource(netMapInfoSource NetMapInfoSource) x.netMapInfoSource = netMapInfoSource } +// SetCircuitBreakerThreshold sets number of consecutive failed connection before +// circuit is considered closed and therefore return error immediately. +func (x *InitParameters) SetCircuitBreakerThreshold(circuitBreakerThreshold int) { + x.circuitBreakerThreshold = circuitBreakerThreshold +} + +// SetCircuitBreakerDuration sets duration for circuit to be considered closed. +// This effectively limits to one new connection try per duration. +func (x *InitParameters) SetCircuitBreakerDuration(circuitBreakerDuration time.Duration) { + x.circuitBreakerDuration = circuitBreakerDuration +} + // GetNodes invokes eponymous method from TreeServiceClient. // // Can return predefined errors: @@ -414,12 +436,19 @@ func (p *Pool) GetNodes(ctx context.Context, prm GetNodesParams) ([]*tree.GetNod // // Must be initialized using Pool.GetSubTree, any other usage is unsafe. type SubTreeReader struct { - cli *rpcapi.GetSubTreeResponseReader + cli *rpcapi.GetSubTreeResponseReader + probe *tree.GetSubTreeResponseBody } // Read reads another list of the subtree nodes. func (x *SubTreeReader) Read(buf []*tree.GetSubTreeResponseBody) (int, error) { - for i := range buf { + i := 0 + if x.probe != nil && len(buf) != 0 { + buf[0] = x.probe + x.probe = nil + i = 1 + } + for ; i < len(buf); i++ { var resp tree.GetSubTreeResponse err := x.cli.Read(&resp) if err == io.EOF { @@ -436,6 +465,10 @@ func (x *SubTreeReader) Read(buf []*tree.GetSubTreeResponseBody) (int, error) { // ReadAll reads all nodes subtree nodes. func (x *SubTreeReader) ReadAll() ([]*tree.GetSubTreeResponseBody, error) { var res []*tree.GetSubTreeResponseBody + if x.probe != nil { + res = append(res, x.probe) + x.probe = nil + } for { var resp tree.GetSubTreeResponse err := x.cli.Read(&resp) @@ -452,6 +485,12 @@ func (x *SubTreeReader) ReadAll() ([]*tree.GetSubTreeResponseBody, error) { // Next gets the next node from subtree. func (x *SubTreeReader) Next() (*tree.GetSubTreeResponseBody, error) { + if x.probe != nil { + res := x.probe + x.probe = nil + return res, nil + } + var resp tree.GetSubTreeResponse err := x.cli.Read(&resp) if err == io.EOF { @@ -495,16 +534,24 @@ func (p *Pool) GetSubTree(ctx context.Context, prm GetSubTreeParams) (*SubTreeRe } var cli *rpcapi.GetSubTreeResponseReader + var probeBody *tree.GetSubTreeResponseBody err := p.requestWithRetry(ctx, prm.CID, func(client *rpcclient.Client) (inErr error) { cli, inErr = rpcapi.GetSubTree(client, request, rpcclient.WithContext(ctx)) - return handleError("failed to get sub tree client", inErr) + if inErr != nil { + return handleError("failed to get sub tree client", inErr) + } + + probe := &tree.GetSubTreeResponse{} + inErr = cli.Read(probe) + probeBody = probe.GetBody() + return handleError("failed to get first resp from sub tree client", inErr) }) p.methods[methodGetSubTree].IncRequests(time.Since(start)) if err != nil { return nil, err } - return &SubTreeReader{cli: cli}, nil + return &SubTreeReader{cli: cli, probe: probeBody}, nil } // AddNode invokes eponymous method from TreeServiceClient. @@ -764,6 +811,14 @@ func fillDefaultInitParams(params *InitParameters) { if params.maxRequestAttempts <= 0 { params.maxRequestAttempts = len(params.nodeParams) } + + if params.circuitBreakerDuration <= 0 { + params.circuitBreakerDuration = defaultCircuitBreakerDuration + } + + if params.circuitBreakerThreshold <= 0 { + params.circuitBreakerThreshold = defaultCircuitBreakerTreshold + } } func (p *Pool) log(level zapcore.Level, msg string, fields ...zap.Field) { @@ -959,14 +1014,17 @@ LOOP: treeCl, ok := p.getClientFromMap(cnrNode.Hash()) if !ok { - treeCl, err = p.getNewTreeClient(ctx, cnrNode) + err = p.cb.Do(cnrNode.Hash(), func() error { + treeCl, err = p.getNewTreeClient(ctx, cnrNode) + return err + }) if err != nil { finErr = finalError(finErr, err) p.log(zap.DebugLevel, "failed to create tree client", zap.String("request_id", reqID), zap.Int("remaining attempts", attempts)) continue } - p.addClientToMap(cnrNode.Hash(), treeCl) + treeCl = p.addClientToMap(cnrNode.Hash(), treeCl) } attempts-- @@ -1001,47 +1059,55 @@ func (p *Pool) getClientFromMap(hash uint64) (client, bool) { return cl, ok } -func (p *Pool) addClientToMap(hash uint64, cl client) { +func (p *Pool) addClientToMap(hash uint64, cl client) client { p.mutex.Lock() + defer p.mutex.Unlock() + + if old, ok := p.clientMap[hash]; ok { + _ = cl.close() + return old + } p.clientMap[hash] = cl - p.mutex.Unlock() + return cl } func (p *Pool) deleteClientFromMap(hash uint64) { p.mutex.Lock() - _ = p.clientMap[hash].close() - delete(p.clientMap, hash) + if cli, ok := p.clientMap[hash]; ok { + _ = cli.close() + delete(p.clientMap, hash) + } p.mutex.Unlock() } func (p *Pool) getNewTreeClient(ctx context.Context, node netmap.NodeInfo) (*treeClient, error) { - var ( - treeCl *treeClient - err error - ) - - node.IterateNetworkEndpoints(func(endpoint string) bool { + for endpoint := range node.NetworkEndpoints() { var addr network.Address - if err = addr.FromString(endpoint); err != nil { + if err := addr.FromString(endpoint); err != nil { p.log(zap.WarnLevel, "can't parse endpoint", zap.String("endpoint", endpoint), zap.Error(err)) - return false + continue } newTreeCl := newTreeClient(addr.URIAddr(), p.dialOptions, p.nodeDialTimeout, p.streamTimeout) - if err = newTreeCl.dial(ctx); err != nil { - p.log(zap.WarnLevel, "failed to dial tree client", zap.String("address", addr.URIAddr()), zap.Error(err)) - return false + if err := newTreeCl.dial(ctx); err != nil { + p.log(zap.WarnLevel, "failed to dial tree client", zap.Error(err)) + + // We have to close connection here after failed `dial()`. + // This is NOT necessary in object pool and regular tree pool without netmap support, because: + // - object pool uses SDK object client which closes connection during `dial()` call by itself, + // - regular tree pool is going to reuse connection by calling `redialIfNecessary()`. + // Tree pool with netmap support does not operate with background goroutine, so we have to close connection immediately. + if err = newTreeCl.close(); err != nil { + p.log(zap.WarnLevel, "failed to close recently dialed tree client", zap.Error(err)) + } + + continue } - treeCl = newTreeCl - return true - }) - - if treeCl == nil { - return nil, fmt.Errorf("tree client wasn't initialized") + return newTreeCl, nil } - return treeCl, nil + return nil, fmt.Errorf("tree client wasn't initialized") } func shouldTryAgain(err error) bool { diff --git a/pool/tree/pool_server_test.go b/pool/tree/pool_server_test.go new file mode 100644 index 00000000..01373dad --- /dev/null +++ b/pool/tree/pool_server_test.go @@ -0,0 +1,345 @@ +package tree + +import ( + "bytes" + "context" + "errors" + "io" + "net" + "runtime" + "strconv" + "testing" + + apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" + apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" + tree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +type mockTreeServer struct { + id int + srv *grpc.Server + lis net.Listener + key *keys.PrivateKey + + healthy bool + addCounter int + + getSubTreeError error + getSubTreeResponses []*tree.GetSubTreeResponse_Body + getSubTreeCounter int +} + +type mockNetmapSource struct { + servers []*mockTreeServer + policy string +} + +func (m *mockNetmapSource) NetMapSnapshot(context.Context) (netmap.NetMap, error) { + nm := netmap.NetMap{} + nodes := make([]netmap.NodeInfo, len(m.servers)) + for i, server := range m.servers { + ni := apinetmap.NodeInfo{} + ni.SetAddresses(server.lis.Addr().String()) + ni.SetPublicKey(server.key.PublicKey().Bytes()) + err := nodes[i].ReadFromV2(ni) // no other way to set address field in netmap.NodeInfo + if err != nil { + return nm, err + } + nodes[i].SetAttribute("id", strconv.Itoa(server.id)) + } + nm.SetNodes(nodes) + return nm, nil +} + +func (m *mockNetmapSource) PlacementPolicy(context.Context, cid.ID) (netmap.PlacementPolicy, error) { + p := netmap.PlacementPolicy{} + return p, p.DecodeString(m.policy) +} + +func (m *mockTreeServer) Serve() { + go m.srv.Serve(m.lis) +} + +func (m *mockTreeServer) Stop() { + m.srv.Stop() +} + +func (m *mockTreeServer) Addr() string { + return m.lis.Addr().String() +} + +func (m *mockTreeServer) Add(context.Context, *tree.AddRequest) (*tree.AddResponse, error) { + m.addCounter++ + return &tree.AddResponse{}, nil +} + +func (m *mockTreeServer) AddByPath(context.Context, *tree.AddByPathRequest) (*tree.AddByPathResponse, error) { + panic("implement me") +} + +func (m *mockTreeServer) Remove(context.Context, *tree.RemoveRequest) (*tree.RemoveResponse, error) { + panic("implement me") +} + +func (m *mockTreeServer) Move(context.Context, *tree.MoveRequest) (*tree.MoveResponse, error) { + panic("implement me") +} + +func (m *mockTreeServer) GetNodeByPath(context.Context, *tree.GetNodeByPathRequest) (*tree.GetNodeByPathResponse, error) { + panic("implement me") +} + +func (m *mockTreeServer) GetSubTree(_ *tree.GetSubTreeRequest, s tree.TreeService_GetSubTreeServer) error { + m.getSubTreeCounter++ + + if m.getSubTreeError != nil { + return m.getSubTreeError + } + + for i := range m.getSubTreeResponses { + if err := s.Send(&tree.GetSubTreeResponse{ + Body: m.getSubTreeResponses[i], + }); err != nil { + return err + } + } + + return nil +} + +func (m *mockTreeServer) TreeList(context.Context, *tree.TreeListRequest) (*tree.TreeListResponse, error) { + panic("implement me") +} + +func (m *mockTreeServer) Apply(context.Context, *tree.ApplyRequest) (*tree.ApplyResponse, error) { + panic("implement me") +} + +func (m *mockTreeServer) GetOpLog(*tree.GetOpLogRequest, tree.TreeService_GetOpLogServer) error { + panic("implement me") +} + +func (m *mockTreeServer) Healthcheck(context.Context, *tree.HealthcheckRequest) (*tree.HealthcheckResponse, error) { + if m.healthy { + return new(tree.HealthcheckResponse), nil + } + return nil, errors.New("not healthy") +} + +func createTestServer(t *testing.T, id int) *mockTreeServer { + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + key, err := keys.NewPrivateKey() + require.NoError(t, err) + + res := &mockTreeServer{ + id: id, + srv: grpc.NewServer(), + lis: lis, + key: key, + healthy: true, + } + + tree.RegisterTreeServiceServer(res.srv, res) + + return res +} + +func preparePoolWithNetmapSource(t *testing.T, n int, p string) (*Pool, []*mockTreeServer, *mockNetmapSource) { + poolInitParams := InitParameters{} + + servers := make([]*mockTreeServer, n) + for i := range servers { + servers[i] = createTestServer(t, i) + servers[i].healthy = true + servers[i].Serve() + poolInitParams.AddNode(pool.NewNodeParam(1, servers[i].Addr(), 1)) + } + + source := &mockNetmapSource{ + servers: servers, + policy: p, + } + + key, err := keys.NewPrivateKey() + require.NoError(t, err) + poolInitParams.SetKey(key) + poolInitParams.SetNetMapInfoSource(source) + + cli, err := NewPool(poolInitParams) + require.NoError(t, err) + + return cli, servers, source +} + +func sortServers(ctx context.Context, servers []*mockTreeServer, source *mockNetmapSource, cnr cid.ID) ([]*mockTreeServer, error) { + res := make([]*mockTreeServer, len(servers)) + snapshot, err := source.NetMapSnapshot(ctx) + if err != nil { + return nil, err + } + + policy, err := source.PlacementPolicy(ctx, cnr) + if err != nil { + return nil, err + } + + cnrNodes, err := snapshot.ContainerNodes(policy, cnr[:]) + if err != nil { + return nil, err + } + + priorityNodes, err := snapshot.PlacementVectors(cnrNodes, cnr[:]) + if err != nil { + return nil, err + } + + // find servers based on public key and store pointers in res + index := 0 + for i := range priorityNodes { + for j := range priorityNodes[i] { + key := priorityNodes[i][j].PublicKey() + for k := range servers { + if bytes.Equal(servers[k].key.PublicKey().Bytes(), key) { + res[index] = servers[k] + index++ + break + } + } + } + } + + return res, nil +} + +func TestConnectionLeak(t *testing.T) { + const ( + numberOfNodes = 4 + placementPolicy = "REP 2" + ) + + // Initialize gRPC servers and create pool with netmap source + treePool, servers, source := preparePoolWithNetmapSource(t, numberOfNodes, placementPolicy) + for i := range servers { + defer servers[i].Stop() + } + + cnr := cidtest.ID() + ctx := context.Background() + + // Make priority node for cnr unhealthy, so it is going to be redialled on every request + sortedServers, err := sortServers(ctx, servers, source, cnr) + require.NoError(t, err) + sortedServers[0].healthy = false + + // Make RPC and check that pool switched to healthy server + _, err = treePool.AddNode(context.Background(), AddNodeParams{CID: cnr}) + require.NoError(t, err) + require.Equal(t, 0, sortedServers[0].addCounter) // unhealthy + require.Equal(t, 1, sortedServers[1].addCounter) // healthy + + // Check that go routines are not leaked during multiple requests + routinesBefore := runtime.NumGoroutine() + for i := 0; i < 1000; i++ { + _, err = treePool.AddNode(context.Background(), AddNodeParams{CID: cnr}) + require.NoError(t, err) + } + // not more than 1 extra goroutine is created due to async operations + require.LessOrEqual(t, runtime.NumGoroutine()-routinesBefore, 1) +} + +func TestStreamRetry(t *testing.T) { + const ( + numberOfNodes = 4 + placementPolicy = "REP 2" + ) + + expected := []*tree.GetSubTreeResponse_Body{ + { + NodeId: []uint64{1}, + }, + { + NodeId: []uint64{2}, + }, + { + NodeId: []uint64{3}, + }, + } + + // Initialize gRPC servers and create pool with netmap source + treePool, servers, source := preparePoolWithNetmapSource(t, numberOfNodes, placementPolicy) + defer func() { + for i := range servers { + servers[i].Stop() + } + }() + + cnr := cidtest.ID() + ctx := context.Background() + + sortedServers, err := sortServers(ctx, servers, source, cnr) + require.NoError(t, err) + + // Return expected response in last priority node, others return error + for i := range sortedServers { + if i == len(sortedServers)-1 { + sortedServers[i].getSubTreeResponses = expected + } else { + sortedServers[i].getSubTreeError = errors.New("tree not found") + } + } + + t.Run("read all", func(t *testing.T) { + reader, err := treePool.GetSubTree(ctx, GetSubTreeParams{CID: cnr}) + require.NoError(t, err) + + data, err := reader.ReadAll() + require.NoError(t, err) + + require.Len(t, data, len(expected)) + for i := range expected { + require.EqualValues(t, expected[i].GetNodeId(), data[i].GetNodeID()) + } + }) + + t.Run("next", func(t *testing.T) { + reader, err := treePool.GetSubTree(ctx, GetSubTreeParams{CID: cnr}) + require.NoError(t, err) + + for i := range expected { + resp, err := reader.Next() + require.NoError(t, err) + require.Equal(t, expected[i].GetNodeId(), resp.GetNodeID()) + } + + _, err = reader.Next() + require.Error(t, io.EOF, err) + }) + + t.Run("read", func(t *testing.T) { + reader, err := treePool.GetSubTree(ctx, GetSubTreeParams{CID: cnr}) + require.NoError(t, err) + + buf := make([]*apitree.GetSubTreeResponseBody, len(expected)) + _, err = reader.Read(buf) + require.NoError(t, err) + + require.Len(t, buf, len(expected)) + for i := range expected { + require.EqualValues(t, expected[i].GetNodeId(), buf[i].GetNodeID()) + } + }) + + for i := range servers { + // check we retried every available node in the pool three times + require.Equal(t, 3, servers[i].getSubTreeCounter) + } +} diff --git a/pool/tree/pool_test.go b/pool/tree/pool_test.go index 607e037b..5814a77c 100644 --- a/pool/tree/pool_test.go +++ b/pool/tree/pool_test.go @@ -10,6 +10,7 @@ import ( cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + netmaptest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" "git.frostfs.info/TrueCloudLab/hrw" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -416,6 +417,21 @@ func TestRetryContainerNodes(t *testing.T) { }) } +func TestDeleteClientTwice(t *testing.T) { + p := Pool{ + clientMap: makeClientMap([]netmap.NodeInfo{netmaptest.NodeInfo()}), + } + // emulate concurrent requests as consecutive requests + // to delete the same client from the map twice + for idToDelete := range p.clientMap { + p.deleteClientFromMap(idToDelete) + require.NotPanics(t, func() { + p.deleteClientFromMap(idToDelete) + }) + } + require.Empty(t, p.clientMap) +} + func makeInnerPool(nodes [][]string) []*innerPool { res := make([]*innerPool, len(nodes)) diff --git a/user/id.go b/user/id.go index 2d64d43c..c77ae677 100644 --- a/user/id.go +++ b/user/id.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "github.com/mr-tron/base58" @@ -12,9 +13,8 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" ) -const idSize = 25 - -var zeroSlice = bytes.Repeat([]byte{0}, idSize) +// idFullSize is the size of ID in bytes, including prefix and checksum. +const idFullSize = util.Uint160Size + 5 // ID identifies users of the FrostFS system. // @@ -25,7 +25,7 @@ var zeroSlice = bytes.Repeat([]byte{0}, idSize) // so it MUST be initialized using some modifying function (e.g. SetScriptHash, // IDFromKey, etc.). type ID struct { - w []byte + w util.Uint160 } // ReadFromV2 reads ID from the refs.OwnerID message. Returns an error if @@ -33,22 +33,7 @@ type ID struct { // // See also WriteToV2. func (x *ID) ReadFromV2(m refs.OwnerID) error { - w := m.GetValue() - if len(w) != idSize { - return fmt.Errorf("invalid length %d, expected %d", len(w), idSize) - } - - if w[0] != address.NEO3Prefix { - return fmt.Errorf("invalid prefix byte 0x%X, expected 0x%X", w[0], address.NEO3Prefix) - } - - if !bytes.Equal(w[21:], hash.Checksum(w[:21])) { - return errors.New("checksum mismatch") - } - - x.w = w - - return nil + return x.setUserID(m.GetValue()) } // WriteToV2 writes ID to the refs.OwnerID message. @@ -56,25 +41,17 @@ func (x *ID) ReadFromV2(m refs.OwnerID) error { // // See also ReadFromV2. func (x ID) WriteToV2(m *refs.OwnerID) { - m.SetValue(x.w) + m.SetValue(x.WalletBytes()) } // SetScriptHash forms user ID from wallet address scripthash. func (x *ID) SetScriptHash(scriptHash util.Uint160) { - if cap(x.w) < idSize { - x.w = make([]byte, idSize) - } else if len(x.w) < idSize { - x.w = x.w[:idSize] - } - - x.w[0] = address.Prefix - copy(x.w[1:], scriptHash.BytesBE()) - copy(x.w[21:], hash.Checksum(x.w[:21])) + x.w = scriptHash } // ScriptHash calculates and returns script hash of ID. -func (x *ID) ScriptHash() (util.Uint160, error) { - return util.Uint160DecodeBytesBE(x.w[1:21]) +func (x *ID) ScriptHash() util.Uint160 { + return x.w } // WalletBytes returns FrostFS user ID as Neo3 wallet address in a binary format. @@ -83,14 +60,18 @@ func (x *ID) ScriptHash() (util.Uint160, error) { // // See also Neo3 wallet docs. func (x ID) WalletBytes() []byte { - return x.w + v := make([]byte, idFullSize) + v[0] = address.Prefix + copy(v[1:], x.w[:]) + copy(v[21:], hash.Checksum(v[:21])) + return v } // EncodeToString encodes ID into FrostFS API V2 protocol string. // // See also DecodeString. func (x ID) EncodeToString() string { - return base58.Encode(x.w) + return base58.Encode(x.WalletBytes()) } // DecodeString decodes FrostFS API V2 protocol string. Returns an error @@ -100,14 +81,11 @@ func (x ID) EncodeToString() string { // // See also EncodeToString. func (x *ID) DecodeString(s string) error { - var err error - - x.w, err = base58.Decode(s) + w, err := base58.Decode(s) if err != nil { return fmt.Errorf("decode base58: %w", err) } - - return nil + return x.setUserID(w) } // String implements fmt.Stringer. @@ -121,10 +99,34 @@ func (x ID) String() string { // Equals defines a comparison relation between two ID instances. func (x ID) Equals(x2 ID) bool { - return bytes.Equal(x.w, x2.w) + return x.w == x2.w } // IsEmpty returns True, if ID is empty value. func (x ID) IsEmpty() bool { - return bytes.Equal(zeroSlice, x.w) + return x.w == util.Uint160{} +} + +func (x *ID) setUserID(w []byte) error { + if len(w) != idFullSize { + return fmt.Errorf("invalid length %d, expected %d", len(w), idFullSize) + } + + if w[0] != address.NEO3Prefix { + return fmt.Errorf("invalid prefix byte 0x%X, expected 0x%X", w[0], address.NEO3Prefix) + } + + if !bytes.Equal(w[21:], hash.Checksum(w[:21])) { + return errors.New("checksum mismatch") + } + + copy(x.w[:], w[1:21]) + + return nil +} + +// Cmp returns an integer comparing two base58 encoded user ID lexicographically. +// The result will be 0 if id1 == id2, -1 if id1 < id2, and +1 if id1 > id2. +func (x ID) Cmp(x2 ID) int { + return strings.Compare(x.EncodeToString(), x2.EncodeToString()) } diff --git a/user/id_test.go b/user/id_test.go index afeb7465..c253336b 100644 --- a/user/id_test.go +++ b/user/id_test.go @@ -3,6 +3,8 @@ package user_test import ( "bytes" "crypto/rand" + "slices" + "strings" "testing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" @@ -51,8 +53,7 @@ func TestID_SetScriptHash(t *testing.T) { func TestID_ScriptHash(t *testing.T) { userID := usertest.ID() - scriptHash, err := userID.ScriptHash() - require.NoError(t, err) + scriptHash := userID.ScriptHash() ownerAddress := userID.EncodeToString() decodedScriptHash, err := address.StringToUint160(ownerAddress) @@ -133,3 +134,16 @@ func TestID_Equal(t *testing.T) { require.True(t, id3.Equals(id1)) // commutativity require.False(t, id1.Equals(id2)) } + +func TestID_Cmp(t *testing.T) { + id1 := usertest.ID() + id2 := usertest.ID() + id3 := usertest.ID() + + arr := []ID{id1, id2, id3} + + slices.SortFunc(arr, ID.Cmp) + for i := 1; i < len(arr); i++ { + require.NotEqual(t, strings.Compare(arr[i-1].EncodeToString(), arr[i].EncodeToString()), 1, "array is not sorted correctly") + } +}