[#1867] services/control: Allow to provide multiple shard IDs to some commands

Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
remotes/fyrchik/neofs-adm-fix-commands
Evgenii Stratonikov 2022-10-10 20:54:14 +03:00 committed by fyrchik
parent 74d2f2c8d3
commit 19c0a74e94
14 changed files with 178 additions and 111 deletions

View File

@ -20,7 +20,7 @@ func evacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
req.Body.Shard_ID = getShardID(cmd)
req.Body.Shard_ID = [][]byte{getShardID(cmd)}
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(dumpIgnoreErrorsFlag)
signRequest(cmd, pk, req)

View File

@ -20,7 +20,7 @@ func flushCache(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.FlushCacheRequest{Body: new(control.FlushCacheRequest_Body)}
req.Body.Shard_ID = getShardID(cmd)
req.Body.Shard_ID = [][]byte{getShardID(cmd)}
signRequest(cmd, pk, req)

View File

@ -71,7 +71,7 @@ func setShardMode(cmd *cobra.Command, _ []string) {
req.SetBody(body)
body.SetMode(mode)
body.SetShardID(getShardID(cmd))
body.SetShardIDList([][]byte{getShardID(cmd)})
reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag)
body.ClearErrorCounter(reset)

View File

@ -15,7 +15,7 @@ import (
// EvacuateShardPrm represents parameters for the EvacuateShard operation.
type EvacuateShardPrm struct {
shardID *shard.ID
shardID []*shard.ID
handler func(oid.Address, *objectSDK.Object) error
ignoreErrors bool
}
@ -25,8 +25,8 @@ type EvacuateShardRes struct {
count int
}
// WithShardID sets shard ID.
func (p *EvacuateShardPrm) WithShardID(id *shard.ID) {
// WithShardIDList sets shard ID.
func (p *EvacuateShardPrm) WithShardIDList(id []*shard.ID) {
p.shardID = id
}
@ -53,30 +53,35 @@ type pooledShard struct {
pool util.WorkerPool
}
var errMustHaveTwoShards = errors.New("amount of shards must be > 2")
var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others.
// The shard being moved must be in read-only mode.
func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error) {
sid := prm.shardID.String()
sidList := make([]string, len(prm.shardID))
for i := range prm.shardID {
sidList[i] = prm.shardID[i].String()
}
e.mtx.RLock()
sh, ok := e.shards[sid]
if !ok {
e.mtx.RUnlock()
return EvacuateShardRes{}, errShardNotFound
for i := range sidList {
sh, ok := e.shards[sidList[i]]
if !ok {
e.mtx.RUnlock()
return EvacuateShardRes{}, errShardNotFound
}
if !sh.GetMode().ReadOnly() {
e.mtx.RUnlock()
return EvacuateShardRes{}, shard.ErrMustBeReadOnly
}
}
if len(e.shards) < 2 && prm.handler == nil {
if len(e.shards)-len(sidList) < 1 && prm.handler == nil {
e.mtx.RUnlock()
return EvacuateShardRes{}, errMustHaveTwoShards
}
if !sh.GetMode().ReadOnly() {
e.mtx.RUnlock()
return EvacuateShardRes{}, shard.ErrMustBeReadOnly
}
// We must have all shards, to have correct information about their
// indexes in a sorted slice and set appropriate marks in the metabase.
// Evacuated shard is skipped during put.
@ -94,72 +99,89 @@ func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error)
weights = append(weights, e.shardWeight(shards[i].Shard))
}
shardMap := make(map[string]*shard.Shard)
for i := range sidList {
for j := range shards {
if shards[j].ID().String() == sidList[i] {
shardMap[sidList[i]] = shards[j].Shard
}
}
}
var listPrm shard.ListWithCursorPrm
listPrm.WithCount(defaultEvacuateBatchSize)
var c *meta.Cursor
var res EvacuateShardRes
for {
listPrm.WithCursor(c)
// TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
// because ListWithCursor works only with the metabase.
listRes, err := sh.Shard.ListWithCursor(listPrm)
if err != nil {
if errors.Is(err, meta.ErrEndOfListing) {
return res, nil
}
return res, err
}
mainLoop:
for n := range sidList {
sh := shardMap[sidList[n]]
// TODO (@fyrchik): #1731 parallelize the loop
lst := listRes.AddressList()
var c *meta.Cursor
for {
listPrm.WithCursor(c)
loop:
for i := range lst {
var getPrm shard.GetPrm
getPrm.SetAddress(lst[i])
getRes, err := sh.Get(getPrm)
// TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
// because ListWithCursor works only with the metabase.
listRes, err := sh.ListWithCursor(listPrm)
if err != nil {
if prm.ignoreErrors {
continue
if errors.Is(err, meta.ErrEndOfListing) {
continue mainLoop
}
return res, err
}
hrw.SortSliceByWeightValue(shards, weights, hrw.Hash([]byte(lst[i].EncodeToString())))
for j := range shards {
if shards[j].ID().String() == sid {
continue
}
putDone, exists := e.putToShard(shards[j].hashedShard, j, shards[j].pool, lst[i], getRes.Object())
if putDone || exists {
if putDone {
e.log.Debug("object is moved to another shard",
zap.String("from", sid),
zap.Stringer("to", shards[j].ID()),
zap.Stringer("addr", lst[i]))
// TODO (@fyrchik): #1731 parallelize the loop
lst := listRes.AddressList()
res.count++
loop:
for i := range lst {
var getPrm shard.GetPrm
getPrm.SetAddress(lst[i])
getRes, err := sh.Get(getPrm)
if err != nil {
if prm.ignoreErrors {
continue
}
continue loop
return res, err
}
hrw.SortSliceByWeightValue(shards, weights, hrw.Hash([]byte(lst[i].EncodeToString())))
for j := range shards {
if _, ok := shardMap[shards[j].ID().String()]; ok {
continue
}
putDone, exists := e.putToShard(shards[j].hashedShard, j, shards[j].pool, lst[i], getRes.Object())
if putDone || exists {
if putDone {
e.log.Debug("object is moved to another shard",
zap.String("from", sidList[n]),
zap.Stringer("to", shards[j].ID()),
zap.Stringer("addr", lst[i]))
res.count++
}
continue loop
}
}
if prm.handler == nil {
// Do not check ignoreErrors flag here because
// ignoring errors on put make this command kinda useless.
return res, fmt.Errorf("%w: %s", errPutShard, lst[i])
}
err = prm.handler(lst[i], getRes.Object())
if err != nil {
return res, err
}
res.count++
}
if prm.handler == nil {
// Do not check ignoreErrors flag here because
// ignoring errors on put make this command kinda useless.
return res, fmt.Errorf("%w: %s", errPutShard, lst[i])
}
err = prm.handler(lst[i], getRes.Object())
if err != nil {
return res, err
}
res.count++
c = listRes.Cursor()
}
c = listRes.Cursor()
}
return res, nil
}

View File

@ -90,7 +90,7 @@ func TestEvacuateShard(t *testing.T) {
checkHasObjects(t)
var prm EvacuateShardPrm
prm.WithShardID(ids[2])
prm.WithShardIDList(ids[2:3])
t.Run("must be read-only", func(t *testing.T) {
res, err := e.Evacuate(prm)
@ -154,7 +154,7 @@ func TestEvacuateNetwork(t *testing.T) {
require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
prm.shardID = ids[0]
prm.shardID = ids[0:1]
res, err := e.Evacuate(prm)
require.ErrorIs(t, err, errMustHaveTwoShards)
@ -166,14 +166,14 @@ func TestEvacuateNetwork(t *testing.T) {
require.ErrorIs(t, err, errReplication)
require.Equal(t, 2, res.Count())
})
t.Run("multiple shards", func(t *testing.T) {
t.Run("multiple shards, evacuate one", func(t *testing.T) {
e, ids, objects := newEngineEvacuate(t, 2, 3)
require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
prm.shardID = ids[1]
prm.shardID = ids[1:2]
prm.handler = acceptOneOf(objects, 2)
res, err := e.Evacuate(prm)
@ -188,4 +188,36 @@ func TestEvacuateNetwork(t *testing.T) {
require.Equal(t, 3, res.Count())
})
})
t.Run("multiple shards, evacuate many", func(t *testing.T) {
e, ids, objects := newEngineEvacuate(t, 4, 5)
evacuateIDs := ids[0:3]
var totalCount int
for i := range evacuateIDs {
res, err := e.shards[ids[i].String()].List()
require.NoError(t, err)
totalCount += len(res.AddressList())
}
for i := range ids {
require.NoError(t, e.shards[ids[i].String()].SetMode(mode.ReadOnly))
}
var prm EvacuateShardPrm
prm.shardID = evacuateIDs
prm.handler = acceptOneOf(objects, totalCount-1)
res, err := e.Evacuate(prm)
require.ErrorIs(t, err, errReplication)
require.Equal(t, totalCount-1, res.Count())
t.Run("no errors", func(t *testing.T) {
prm.handler = acceptOneOf(objects, totalCount)
res, err := e.Evacuate(prm)
require.NoError(t, err)
require.Equal(t, totalCount, res.Count())
})
})
}

View File

@ -9,7 +9,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/engine"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
"github.com/nspcc-dev/neofs-node/pkg/services/control"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement"
"github.com/nspcc-dev/neofs-node/pkg/services/replicator"
@ -26,10 +25,8 @@ func (s *Server) EvacuateShard(_ context.Context, req *control.EvacuateShardRequ
return nil, status.Error(codes.PermissionDenied, err.Error())
}
shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID())
var prm engine.EvacuateShardPrm
prm.WithShardID(shardID)
prm.WithShardIDList(getShardIDList(req.GetBody().GetShard_ID()))
prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
prm.WithFaultHandler(s.replicate)

View File

@ -4,7 +4,6 @@ import (
"context"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/engine"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
"github.com/nspcc-dev/neofs-node/pkg/services/control"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -16,14 +15,14 @@ func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) (
return nil, status.Error(codes.PermissionDenied, err.Error())
}
shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID())
for _, shardID := range getShardIDList(req.GetBody().GetShard_ID()) {
var prm engine.FlushWriteCachePrm
prm.SetShardID(shardID)
var prm engine.FlushWriteCachePrm
prm.SetShardID(shardID)
_, err = s.s.FlushWriteCache(prm)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
_, err = s.s.FlushWriteCache(prm)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
resp := &control.FlushCacheResponse{Body: &control.FlushCacheResponse_Body{}}

View File

@ -0,0 +1,11 @@
package control
import "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
func getShardIDList(raw [][]byte) []*shard.ID {
res := make([]*shard.ID, 0, len(raw))
for i := range raw {
res = append(res, shard.NewIDFromBytes(raw[i]))
}
return res
}

View File

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard/mode"
"github.com/nspcc-dev/neofs-node/pkg/services/control"
"google.golang.org/grpc/codes"
@ -21,8 +20,7 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
var (
m mode.Mode
requestedMode = req.GetBody().GetMode()
requestedShard = shard.NewIDFromBytes(req.Body.GetShard_ID())
requestedMode = req.GetBody().GetMode()
)
switch requestedMode {
@ -38,9 +36,11 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown shard mode: %s", requestedMode))
}
err = s.s.SetShardMode(requestedShard, m, false)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
for _, shardID := range getShardIDList(req.Body.GetShard_ID()) {
err = s.s.SetShardMode(shardID, m, false)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// create and fill response

View File

@ -91,8 +91,8 @@ func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) {
}
}
// SetShardID sets shard ID whose mode is requested to be set.
func (x *SetShardModeRequest_Body) SetShardID(v []byte) {
// SetShardIDList sets shard ID whose mode is requested to be set.
func (x *SetShardModeRequest_Body) SetShardIDList(v [][]byte) {
if v != nil {
x.Shard_ID = v
}

View File

@ -1541,7 +1541,7 @@ type SetShardModeRequest_Body struct {
unknownFields protoimpl.UnknownFields
// ID of the shard.
Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
// Mode that requested to be set.
Mode ShardMode `protobuf:"varint,2,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"`
// Flag signifying whether error counter should be set to 0.
@ -1580,7 +1580,7 @@ func (*SetShardModeRequest_Body) Descriptor() ([]byte, []int) {
return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8, 0}
}
func (x *SetShardModeRequest_Body) GetShard_ID() []byte {
func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte {
if x != nil {
return x.Shard_ID
}
@ -1963,7 +1963,7 @@ type EvacuateShardRequest_Body struct {
unknownFields protoimpl.UnknownFields
// ID of the shard.
Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
// Flag indicating whether object read errors should be ignored.
IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
}
@ -2000,7 +2000,7 @@ func (*EvacuateShardRequest_Body) Descriptor() ([]byte, []int) {
return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16, 0}
}
func (x *EvacuateShardRequest_Body) GetShard_ID() []byte {
func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
if x != nil {
return x.Shard_ID
}
@ -2069,7 +2069,7 @@ type FlushCacheRequest_Body struct {
unknownFields protoimpl.UnknownFields
// ID of the shard.
Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
}
func (x *FlushCacheRequest_Body) Reset() {
@ -2104,7 +2104,7 @@ func (*FlushCacheRequest_Body) Descriptor() ([]byte, []int) {
return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18, 0}
}
func (x *FlushCacheRequest_Body) GetShard_ID() []byte {
func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
if x != nil {
return x.Shard_ID
}
@ -2251,7 +2251,7 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
0x72, 0x65, 0x1a, 0x77, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68,
0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68,
0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68,
0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x68,
0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a,
@ -2344,7 +2344,7 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
0x65, 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e,
0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x45, 0x76,
@ -2367,7 +2367,7 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09,
0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64,
0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x84, 0x01, 0x0a,
0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x84, 0x01, 0x0a,
0x12, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73,

View File

@ -160,7 +160,7 @@ message SetShardModeRequest {
// Request body structure.
message Body {
// ID of the shard.
bytes shard_ID = 1;
repeated bytes shard_ID = 1;
// Mode that requested to be set.
ShardMode mode = 2;
@ -294,7 +294,7 @@ message EvacuateShardRequest {
// Request body structure.
message Body {
// ID of the shard.
bytes shard_ID = 1;
repeated bytes shard_ID = 1;
// Flag indicating whether object read errors should be ignored.
bool ignore_errors = 2;
@ -320,7 +320,7 @@ message FlushCacheRequest {
// Request body structure.
message Body {
// ID of the shard.
bytes shard_ID = 1;
repeated bytes shard_ID = 1;
}
Body body = 1;

View File

@ -618,7 +618,7 @@ func (x *ListShardsResponse) SetSignature(sig *Signature) {
//
// Structures with the same field values have the same binary size.
func (x *SetShardModeRequest_Body) StableSize() (size int) {
size += proto.BytesSize(1, x.Shard_ID)
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.EnumSize(2, int32(x.Mode))
size += proto.BoolSize(3, x.ResetErrorCounter)
return size
@ -640,7 +640,7 @@ func (x *SetShardModeRequest_Body) StableMarshal(buf []byte) []byte {
buf = make([]byte, x.StableSize())
}
var offset int
offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.EnumMarshal(2, buf[offset:], int32(x.Mode))
offset += proto.BoolMarshal(3, buf[offset:], x.ResetErrorCounter)
return buf
@ -1238,7 +1238,7 @@ func (x *SynchronizeTreeResponse) SetSignature(sig *Signature) {
//
// Structures with the same field values have the same binary size.
func (x *EvacuateShardRequest_Body) StableSize() (size int) {
size += proto.BytesSize(1, x.Shard_ID)
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
return size
}
@ -1259,7 +1259,7 @@ func (x *EvacuateShardRequest_Body) StableMarshal(buf []byte) []byte {
buf = make([]byte, x.StableSize())
}
var offset int
offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
return buf
}
@ -1400,7 +1400,7 @@ func (x *EvacuateShardResponse) SetSignature(sig *Signature) {
//
// Structures with the same field values have the same binary size.
func (x *FlushCacheRequest_Body) StableSize() (size int) {
size += proto.BytesSize(1, x.Shard_ID)
size += proto.RepeatedBytesSize(1, x.Shard_ID)
return size
}
@ -1420,7 +1420,7 @@ func (x *FlushCacheRequest_Body) StableMarshal(buf []byte) []byte {
buf = make([]byte, x.StableSize())
}
var offset int
offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
return buf
}

View File

@ -139,17 +139,23 @@ func TestSetShardModeRequest_Body_StableMarshal(t *testing.T) {
func generateSetShardModeRequestBody() *control.SetShardModeRequest_Body {
body := new(control.SetShardModeRequest_Body)
body.SetShardID([]byte{0, 1, 2, 3, 4})
body.SetShardIDList([][]byte{{0, 1, 2, 3, 4}})
body.SetMode(control.ShardMode_READ_WRITE)
return body
}
func equalSetShardModeRequestBodies(b1, b2 *control.SetShardModeRequest_Body) bool {
if b1.GetMode() != b2.GetMode() || !bytes.Equal(b1.Shard_ID, b2.Shard_ID) {
if b1.GetMode() != b2.GetMode() || len(b1.Shard_ID) != len(b2.Shard_ID) {
return false
}
for i := range b1.Shard_ID {
if !bytes.Equal(b1.Shard_ID[i], b2.Shard_ID[i]) {
return false
}
}
return true
}