engine: Allow to remove redundant object copies #191

Merged
fyrchik merged 3 commits from fyrchik/frostfs-node:shard-reinsertion into master 2023-04-07 17:25:51 +00:00
13 changed files with 1237 additions and 236 deletions

View file

@ -0,0 +1,53 @@
package control
import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"github.com/spf13/cobra"
)
const (
concurrencyFlag = "concurrency"
removeDuplicatesFlag = "remove-duplicates"
)
var doctorCmd = &cobra.Command{
Use: "doctor",
Short: "Restructure node's storage",
Long: "Restructure node's storage",
Run: doctor,
}
func doctor(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.DoctorRequest{Body: new(control.DoctorRequest_Body)}
req.Body.Concurrency, _ = cmd.Flags().GetUint32(concurrencyFlag)
req.Body.RemoveDuplicates, _ = cmd.Flags().GetBool(removeDuplicatesFlag)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.DoctorResponse
var err error
err = cli.ExecRaw(func(client *client.Client) error {
resp, err = control.Doctor(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Operation has finished.")
}
func initControlDoctorCmd() {
initControlFlags(doctorCmd)
ff := doctorCmd.Flags()
ff.Uint32(concurrencyFlag, 0, "Number of parallel threads to use")
ff.Bool(removeDuplicatesFlag, false, "Remove duplicate objects")
}

View file

@ -17,6 +17,7 @@ func initControlShardsCmd() {
shardsCmd.AddCommand(restoreShardCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)
initControlShardsListCmd()
initControlSetShardModeCmd()
@ -24,4 +25,5 @@ func initControlShardsCmd() {
initControlRestoreShardCmd()
initControlEvacuateShardCmd()
initControlFlushCacheCmd()
initControlDoctorCmd()
}

View file

@ -17,6 +17,8 @@ import (
type StorageEngine struct {
*cfg
removeDuplicatesInProgress atomic.Bool
mtx *sync.RWMutex
shards map[string]hashedShard

View file

@ -0,0 +1,138 @@
package engine
import (
"context"
"errors"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
// errRemoveDuplicatesInProgress is returned when another rebalancing is in progress.
// We need it because `Rebalance` removes objects and executing it concurrently
// on 2 shards can lead to data loss. In future this restriction could be relaxed.
var errRemoveDuplicatesInProgress = errors.New("redundant copies removal is already in progress")
const defaultRemoveDuplicatesConcurrency = 256
type RemoveDuplicatesPrm struct {
Concurrency int
}
// RemoveDuplicates iterates over all objects and removes duplicate object copies
// from shards which are worse as defined by HRW sort.
// Safety:
// 1. Concurrent execution is prohibited, thus 1 object copy should always be left.
// 2. If we delete an object from another thread, this is not a problem. Currently,
// we have 2 thread that can remove "valid" (non-expired and logically non-removed) objects:
// policer and rebalance. For rebalance see (1).
// If policer removes something, we do not care if both copies are removed or one of them is left,
// as the remaining copy will be removed during the next policer iteration.
func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicatesPrm) error {
if !e.removeDuplicatesInProgress.CompareAndSwap(false, true) {
return errRemoveDuplicatesInProgress
}
defer e.removeDuplicatesInProgress.Store(false)
if prm.Concurrency <= 0 {
prm.Concurrency = defaultRemoveDuplicatesConcurrency
}
e.log.Info("starting removal of locally-redundant copies",
zap.Int("concurrency", prm.Concurrency))
// The mutext must be taken for the whole duration to avoid target shard being removed
// concurrently: this can lead to data loss.
e.mtx.RLock()
defer e.mtx.RUnlock()
// Iterate by shards to be sure that no objects from 2 different shards are removed simultaneously.
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
// However we could change weights in future and easily forget this function.
for _, sh := range e.shards {
e.log.Debug("started duplicates removal routine", zap.String("shard_id", sh.ID().String()))
ch := make(chan oid.Address)
errG, ctx := errgroup.WithContext(ctx)
errG.SetLimit(prm.Concurrency + 1) // +1 for the listing thread
errG.Go(func() error {
defer close(ch)
var cursor *meta.Cursor
for {
var listPrm shard.ListWithCursorPrm
listPrm.WithCount(uint32(prm.Concurrency))
listPrm.WithCursor(cursor)
res, err := sh.ListWithCursor(listPrm)
if err != nil {
if errors.Is(err, meta.ErrEndOfListing) {
return nil
}
return err
}
for _, addr := range res.AddressList() {
select {
case <-ctx.Done():
return ctx.Err()
case ch <- addr.Address:
}
}
cursor = res.Cursor()
}
})
for i := 0; i < prm.Concurrency; i++ {
errG.Go(func() error {
return e.removeObjects(ctx, ch)
})
}
if err := errG.Wait(); err != nil {
e.log.Error("finished removal of locally-redundant copies", zap.Error(err))
return err
}
}
e.log.Info("finished removal of locally-redundant copies")
return nil
}
// removeObjects reads addresses from ch and removes all objects from other shards, excluding excludeID.
func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address) error {
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
shards = append(shards, sh)
}
for addr := range ch {
h := hrw.Hash([]byte(addr.EncodeToString()))
shards := sortShardsByWeight(shards, h)
found := false
for i := range shards {
var existsPrm shard.ExistsPrm
existsPrm.SetAddress(addr)
res, err := shards[i].Exists(existsPrm)
if err != nil {
return err
} else if !res.Exists() {
continue
} else if !found {
found = true
continue
}
var deletePrm shard.DeletePrm
deletePrm.SetAddresses(addr)
_, err = shards[i].Delete(deletePrm)
if err != nil {
return err
}
}
}
return nil
}

View file

@ -0,0 +1,208 @@
package engine
import (
"context"
"sync"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
)
func TestRebalance(t *testing.T) {
te := newEngineWithErrorThreshold(t, "", 0)
const (
objCount = 20
copyCount = (objCount + 2) / 3
)
type objectWithShard struct {
bestShard shard.ID
worstShard shard.ID
object *objectSDK.Object
}
objects := make([]objectWithShard, objCount)
for i := range objects {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
obj.SetPayload(make([]byte, errSmallSize))
objects[i].object = obj
shards := te.ng.sortShardsByWeight(object.AddressOf(obj))
objects[i].bestShard = *shards[0].Shard.ID()
objects[i].worstShard = *shards[1].Shard.ID()
}
for i := range objects {
var prm shard.PutPrm
prm.SetObject(objects[i].object)
var err1, err2 error
te.ng.mtx.RLock()
// Every 3rd object (i%3 == 0) is put to both shards, others are distributed.
if i%3 != 1 {
_, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
}
if i%3 != 2 {
_, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
}
te.ng.mtx.RUnlock()
require.NoError(t, err1)
require.NoError(t, err2)
}
var removedMtx sync.Mutex
var removed []deleteEvent
for _, shard := range te.shards {
id := *shard.id
shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) {
removedMtx.Lock()
removed = append(removed, deleteEvent{shardID: id, addr: prm.Address})
removedMtx.Unlock()
return common.DeleteRes{}, nil
}))
}
err := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
require.NoError(t, err)
require.Equal(t, copyCount, len(removed))
removedMask := make([]bool, len(objects))
loop:
for i := range removed {
for j := range objects {
if removed[i].addr == object.AddressOf(objects[j].object) {
require.Equal(t, objects[j].worstShard, removed[i].shardID,
"object %d was expected to be removed from another shard", j)
removedMask[j] = true
continue loop
}
}
require.FailNow(t, "unexpected object was removed", removed[i].addr)
}
for i := 0; i < copyCount; i++ {
if i%3 == 0 {
require.True(t, removedMask[i], "object %d was expected to be removed", i)
} else {
require.False(t, removedMask[i], "object %d was not expected to be removed", i)
}
}
}
func TestRebalanceSingleThread(t *testing.T) {
te := newEngineWithErrorThreshold(t, "", 0)
obj := testutil.GenerateObjectWithCID(cidtest.ID())
obj.SetPayload(make([]byte, errSmallSize))
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)
require.NoError(t, err2)
signal := make(chan struct{}) // unblock rebalance
started := make(chan struct{}) // make sure rebalance is started
for _, shard := range te.shards {
shard.largeFileStorage.SetOption(teststore.WithDelete(func(common.DeletePrm) (common.DeleteRes, error) {
close(started)
<-signal
return common.DeleteRes{}, nil
}))
}
var firstErr error
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
firstErr = te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
}()
<-started
secondErr := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
require.ErrorIs(t, secondErr, errRemoveDuplicatesInProgress)
close(signal)
wg.Wait()
require.NoError(t, firstErr)
}
type deleteEvent struct {
shardID shard.ID
addr oid.Address
}
func TestRebalanceExitByContext(t *testing.T) {
te := newEngineWithErrorThreshold(t, "", 0)
objects := make([]*objectSDK.Object, 4)
for i := range objects {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
obj.SetPayload(make([]byte, errSmallSize))
objects[i] = obj
}
for i := range objects {
var prm shard.PutPrm
prm.SetObject(objects[i])
te.ng.mtx.RLock()
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)
require.NoError(t, err2)
}
var removed []deleteEvent
deleteCh := make(chan struct{})
signal := make(chan struct{})
for _, shard := range te.shards {
id := *shard.id
shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) {
deleteCh <- struct{}{}
<-signal
removed = append(removed, deleteEvent{shardID: id, addr: prm.Address})
return common.DeleteRes{}, nil
}))
}
ctx, cancel := context.WithCancel(context.Background())
var rebalanceErr error
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
rebalanceErr = te.ng.RemoveDuplicates(ctx, RemoveDuplicatesPrm{Concurrency: 1})
}()
const removeCount = 3
for i := 0; i < removeCount-1; i++ {
<-deleteCh
signal <- struct{}{}
}
<-deleteCh
cancel()
close(signal)
wg.Wait()
require.ErrorIs(t, rebalanceErr, context.Canceled)
require.Equal(t, removeCount, len(removed))
}

View file

@ -208,16 +208,21 @@ func (e *StorageEngine) sortShardsByWeight(objAddr interface{ EncodeToString() s
e.mtx.RLock()
defer e.mtx.RUnlock()
h := hrw.Hash([]byte(objAddr.EncodeToString()))
shards := make([]hashedShard, 0, len(e.shards))
weights := make([]float64, 0, len(e.shards))
for _, sh := range e.shards {
shards = append(shards, hashedShard(sh))
weights = append(weights, e.shardWeight(sh.Shard))
}
return sortShardsByWeight(shards, h)
}
func sortShardsByWeight(shards []hashedShard, h uint64) []hashedShard {
weights := make([]float64, 0, len(shards))
for _, sh := range shards {
weights = append(weights, float64(sh.Shard.WeightValues().FreeSpace))
}
hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(objAddr.EncodeToString())))
hrw.SortHasherSliceByWeightValue(shards, weights, h)
return shards
}

View file

@ -200,3 +200,21 @@ func (w *flushCacheResponseWrapper) FromGRPCMessage(m grpc.Message) error {
w.FlushCacheResponse = r
return nil
}
type doctorResponseWrapper struct {
*DoctorResponse
}
func (w *doctorResponseWrapper) ToGRPCMessage() grpc.Message {
return w.DoctorResponse
}
func (w *doctorResponseWrapper) FromGRPCMessage(m grpc.Message) error {
r, ok := m.(*DoctorResponse)
if !ok {
return message.NewUnexpectedMessageType(m, (*DoctorResponse)(nil))
}
w.DoctorResponse = r
return nil
}

View file

@ -18,6 +18,7 @@ const (
rpcSynchronizeTree = "SynchronizeTree"
rpcEvacuateShard = "EvacuateShard"
rpcFlushCache = "FlushCache"
rpcDoctor = "Doctor"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@ -191,3 +192,16 @@ func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallO
return wResp.FlushCacheResponse, nil
}
// Doctor executes ControlService.Doctor RPC.
func Doctor(cli *client.Client, req *DoctorRequest, opts ...client.CallOption) (*DoctorResponse, error) {
wResp := &doctorResponseWrapper{new(DoctorResponse)}
wReq := &requestWrapper{m: req}
err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDoctor), wReq, wResp, opts...)
if err != nil {
return nil, err
}
return wResp.DoctorResponse, nil
}

View file

@ -0,0 +1,37 @@
package control
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*control.DoctorResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
if !req.Body.RemoveDuplicates {
return nil, status.Error(codes.InvalidArgument, "operation not specified")
}
var prm engine.RemoveDuplicatesPrm
prm.Concurrency = int(req.Body.Concurrency)
err = s.s.RemoveDuplicates(ctx, prm)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
resp := &control.DoctorResponse{Body: &control.DoctorResponse_Body{}}
err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return resp, nil
}

File diff suppressed because it is too large Load diff

View file

@ -37,6 +37,9 @@ service ControlService {
// FlushCache moves all data from one shard to the others.
rpc FlushCache (FlushCacheRequest) returns (FlushCacheResponse);
// Doctor performs storage restructuring operations on engine.
rpc Doctor (DoctorRequest) returns (DoctorResponse);
}
// Health check request.
@ -345,3 +348,28 @@ message FlushCacheResponse {
Body body = 1;
Signature signature = 2;
}
// Doctor request.
message DoctorRequest {
// Request body structure.
message Body {
// Number of threads to use for the operation.
uint32 concurrency = 1;
// Flag to search engine for duplicate objects and leave only one copy.
bool remove_duplicates = 2;
}
Body body = 1;
Signature signature = 2;
}
// Doctor response.
message DoctorResponse {
// Response body structure.
message Body {
}
Body body = 1;
Signature signature = 2;
}

View file

@ -1548,3 +1548,156 @@ func (x *FlushCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
func (x *FlushCacheResponse) SetSignature(sig *Signature) {
x.Signature = sig
}
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *DoctorRequest_Body) StableSize() (size int) {
size += proto.UInt32Size(1, x.Concurrency)
size += proto.BoolSize(2, x.RemoveDuplicates)
return size
}
// StableMarshal marshals x in protobuf binary format with stable field order.
//
// If buffer length is less than x.StableSize(), new buffer is allocated.
//
// Returns any error encountered which did not allow writing the data completely.
// Otherwise, returns the buffer in which the data is written.
//
// Structures with the same field values have the same binary format.
func (x *DoctorRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
return []byte{}
}
if buf == nil {
buf = make([]byte, x.StableSize())
}
var offset int
offset += proto.UInt32Marshal(1, buf[offset:], x.Concurrency)
offset += proto.BoolMarshal(2, buf[offset:], x.RemoveDuplicates)
return buf
}
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *DoctorRequest) StableSize() (size int) {
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
// StableMarshal marshals x in protobuf binary format with stable field order.
//
// If buffer length is less than x.StableSize(), new buffer is allocated.
//
// Returns any error encountered which did not allow writing the data completely.
// Otherwise, returns the buffer in which the data is written.
//
// Structures with the same field values have the same binary format.
func (x *DoctorRequest) StableMarshal(buf []byte) []byte {
if x == nil {
return []byte{}
}
if buf == nil {
buf = make([]byte, x.StableSize())
}
var offset int
offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
return buf
}
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
// Returns any error encountered which did not allow writing the data completely.
// Otherwise, returns the buffer in which the data is written.
//
// Structures with the same field values have the same signed data.
func (x *DoctorRequest) SignedDataSize() int {
return x.GetBody().StableSize()
}
// SignedDataSize returns size of the request signed data in bytes.
//
// Structures with the same field values have the same signed data size.
func (x *DoctorRequest) ReadSignedData(buf []byte) ([]byte, error) {
return x.GetBody().StableMarshal(buf), nil
}
func (x *DoctorRequest) SetSignature(sig *Signature) {
x.Signature = sig
}
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *DoctorResponse_Body) StableSize() (size int) {
return size
}
// StableMarshal marshals x in protobuf binary format with stable field order.
//
// If buffer length is less than x.StableSize(), new buffer is allocated.
//
// Returns any error encountered which did not allow writing the data completely.
// Otherwise, returns the buffer in which the data is written.
//
// Structures with the same field values have the same binary format.
func (x *DoctorResponse_Body) StableMarshal(buf []byte) []byte {
return buf
}
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *DoctorResponse) StableSize() (size int) {
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
// StableMarshal marshals x in protobuf binary format with stable field order.
//
// If buffer length is less than x.StableSize(), new buffer is allocated.
//
// Returns any error encountered which did not allow writing the data completely.
// Otherwise, returns the buffer in which the data is written.
//
// Structures with the same field values have the same binary format.
func (x *DoctorResponse) StableMarshal(buf []byte) []byte {
if x == nil {
return []byte{}
}
if buf == nil {
buf = make([]byte, x.StableSize())
}
var offset int
offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
return buf
}
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
// Returns any error encountered which did not allow writing the data completely.
// Otherwise, returns the buffer in which the data is written.
//
// Structures with the same field values have the same signed data.
func (x *DoctorResponse) SignedDataSize() int {
return x.GetBody().StableSize()
}
// SignedDataSize returns size of the request signed data in bytes.
//
// Structures with the same field values have the same signed data size.
func (x *DoctorResponse) ReadSignedData(buf []byte) ([]byte, error) {
return x.GetBody().StableMarshal(buf), nil
}
func (x *DoctorResponse) SetSignature(sig *Signature) {
x.Signature = sig
}

View file

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc-gen-go-grpc v1.3.0
// - protoc v3.21.12
// source: pkg/services/control/service.proto
@ -18,6 +18,20 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
ControlService_HealthCheck_FullMethodName = "/control.ControlService/HealthCheck"
ControlService_SetNetmapStatus_FullMethodName = "/control.ControlService/SetNetmapStatus"
ControlService_DropObjects_FullMethodName = "/control.ControlService/DropObjects"
ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
ControlService_DumpShard_FullMethodName = "/control.ControlService/DumpShard"
ControlService_RestoreShard_FullMethodName = "/control.ControlService/RestoreShard"
ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
ControlService_FlushCache_FullMethodName = "/control.ControlService/FlushCache"
ControlService_Doctor_FullMethodName = "/control.ControlService/Doctor"
)
// ControlServiceClient is the client API for ControlService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
@ -42,6 +56,8 @@ type ControlServiceClient interface {
EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error)
// Doctor performs storage restructuring operations on engine.
Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error)
}
type controlServiceClient struct {
@ -54,7 +70,7 @@ func NewControlServiceClient(cc grpc.ClientConnInterface) ControlServiceClient {
func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/HealthCheck", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_HealthCheck_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -63,7 +79,7 @@ func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckR
func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetmapStatusRequest, opts ...grpc.CallOption) (*SetNetmapStatusResponse, error) {
out := new(SetNetmapStatusResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/SetNetmapStatus", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_SetNetmapStatus_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -72,7 +88,7 @@ func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetma
func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error) {
out := new(DropObjectsResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/DropObjects", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_DropObjects_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -81,7 +97,7 @@ func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsR
func (c *controlServiceClient) ListShards(ctx context.Context, in *ListShardsRequest, opts ...grpc.CallOption) (*ListShardsResponse, error) {
out := new(ListShardsResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/ListShards", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_ListShards_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -90,7 +106,7 @@ func (c *controlServiceClient) ListShards(ctx context.Context, in *ListShardsReq
func (c *controlServiceClient) SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) {
out := new(SetShardModeResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/SetShardMode", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_SetShardMode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -99,7 +115,7 @@ func (c *controlServiceClient) SetShardMode(ctx context.Context, in *SetShardMod
func (c *controlServiceClient) DumpShard(ctx context.Context, in *DumpShardRequest, opts ...grpc.CallOption) (*DumpShardResponse, error) {
out := new(DumpShardResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/DumpShard", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_DumpShard_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -108,7 +124,7 @@ func (c *controlServiceClient) DumpShard(ctx context.Context, in *DumpShardReque
func (c *controlServiceClient) RestoreShard(ctx context.Context, in *RestoreShardRequest, opts ...grpc.CallOption) (*RestoreShardResponse, error) {
out := new(RestoreShardResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/RestoreShard", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_RestoreShard_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -117,7 +133,7 @@ func (c *controlServiceClient) RestoreShard(ctx context.Context, in *RestoreShar
func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) {
out := new(SynchronizeTreeResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/SynchronizeTree", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_SynchronizeTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -126,7 +142,7 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron
func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
out := new(EvacuateShardResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/EvacuateShard", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -135,7 +151,16 @@ func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateSh
func (c *controlServiceClient) FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error) {
out := new(FlushCacheResponse)
err := c.cc.Invoke(ctx, "/control.ControlService/FlushCache", in, out, opts...)
err := c.cc.Invoke(ctx, ControlService_FlushCache_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controlServiceClient) Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error) {
out := new(DoctorResponse)
err := c.cc.Invoke(ctx, ControlService_Doctor_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -166,6 +191,8 @@ type ControlServiceServer interface {
EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error)
// Doctor performs storage restructuring operations on engine.
Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@ -202,6 +229,9 @@ func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *Evacuat
func (UnimplementedControlServiceServer) FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FlushCache not implemented")
}
func (UnimplementedControlServiceServer) Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Doctor not implemented")
}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@ -224,7 +254,7 @@ func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/HealthCheck",
FullMethod: ControlService_HealthCheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).HealthCheck(ctx, req.(*HealthCheckRequest))
@ -242,7 +272,7 @@ func _ControlService_SetNetmapStatus_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/SetNetmapStatus",
FullMethod: ControlService_SetNetmapStatus_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SetNetmapStatus(ctx, req.(*SetNetmapStatusRequest))
@ -260,7 +290,7 @@ func _ControlService_DropObjects_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/DropObjects",
FullMethod: ControlService_DropObjects_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).DropObjects(ctx, req.(*DropObjectsRequest))
@ -278,7 +308,7 @@ func _ControlService_ListShards_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/ListShards",
FullMethod: ControlService_ListShards_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).ListShards(ctx, req.(*ListShardsRequest))
@ -296,7 +326,7 @@ func _ControlService_SetShardMode_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/SetShardMode",
FullMethod: ControlService_SetShardMode_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SetShardMode(ctx, req.(*SetShardModeRequest))
@ -314,7 +344,7 @@ func _ControlService_DumpShard_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/DumpShard",
FullMethod: ControlService_DumpShard_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).DumpShard(ctx, req.(*DumpShardRequest))
@ -332,7 +362,7 @@ func _ControlService_RestoreShard_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/RestoreShard",
FullMethod: ControlService_RestoreShard_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).RestoreShard(ctx, req.(*RestoreShardRequest))
@ -350,7 +380,7 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/SynchronizeTree",
FullMethod: ControlService_SynchronizeTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SynchronizeTree(ctx, req.(*SynchronizeTreeRequest))
@ -368,7 +398,7 @@ func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/EvacuateShard",
FullMethod: ControlService_EvacuateShard_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
@ -386,7 +416,7 @@ func _ControlService_FlushCache_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/control.ControlService/FlushCache",
FullMethod: ControlService_FlushCache_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).FlushCache(ctx, req.(*FlushCacheRequest))
@ -394,6 +424,24 @@ func _ControlService_FlushCache_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
func _ControlService_Doctor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DoctorRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControlServiceServer).Doctor(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: ControlService_Doctor_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).Doctor(ctx, req.(*DoctorRequest))
}
return interceptor(ctx, in, info, handler)
}
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@ -441,6 +489,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "FlushCache",
Handler: _ControlService_FlushCache_Handler,
},
{
MethodName: "Doctor",
Handler: _ControlService_Doctor_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",