2020-11-30 14:59:00 +00:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
2023-03-23 14:59:14 +00:00
|
|
|
"context"
|
2021-11-09 15:46:12 +00:00
|
|
|
"errors"
|
2021-05-18 08:12:51 +00:00
|
|
|
"fmt"
|
2022-10-05 12:53:42 +00:00
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
2022-06-08 11:03:29 +00:00
|
|
|
"sync"
|
2021-05-18 08:12:51 +00:00
|
|
|
|
2023-04-12 14:35:10 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
2023-03-07 13:38:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
2020-11-30 14:59:00 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2022-06-27 08:36:32 +00:00
|
|
|
type shardInitError struct {
|
|
|
|
err error
|
|
|
|
id string
|
|
|
|
}
|
|
|
|
|
2020-11-30 14:59:00 +00:00
|
|
|
// Open opens all StorageEngine's components.
|
|
|
|
func (e *StorageEngine) Open() error {
|
2021-11-09 15:46:12 +00:00
|
|
|
return e.open()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *StorageEngine) open() error {
|
2023-02-06 10:30:33 +00:00
|
|
|
e.mtx.Lock()
|
|
|
|
defer e.mtx.Unlock()
|
2020-11-30 14:59:00 +00:00
|
|
|
|
2022-06-08 11:03:29 +00:00
|
|
|
var wg sync.WaitGroup
|
2023-02-06 10:30:33 +00:00
|
|
|
var errCh = make(chan shardInitError, len(e.shards))
|
2022-06-08 11:03:29 +00:00
|
|
|
|
2020-11-30 14:59:00 +00:00
|
|
|
for id, sh := range e.shards {
|
2022-06-08 11:03:29 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(id string, sh *shard.Shard) {
|
|
|
|
defer wg.Done()
|
|
|
|
if err := sh.Open(); err != nil {
|
2023-02-06 10:30:33 +00:00
|
|
|
errCh <- shardInitError{
|
|
|
|
err: err,
|
|
|
|
id: id,
|
|
|
|
}
|
2022-06-08 11:03:29 +00:00
|
|
|
}
|
|
|
|
}(id, sh.Shard)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(errCh)
|
|
|
|
|
2023-02-06 10:30:33 +00:00
|
|
|
for res := range errCh {
|
|
|
|
if res.err != nil {
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
|
2023-02-06 10:30:33 +00:00
|
|
|
zap.String("id", res.id),
|
|
|
|
zap.Error(res.err))
|
|
|
|
|
|
|
|
sh := e.shards[res.id]
|
|
|
|
delete(e.shards, res.id)
|
|
|
|
|
|
|
|
err := sh.Close()
|
|
|
|
if err != nil {
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
|
2023-02-06 10:30:33 +00:00
|
|
|
zap.String("id", res.id),
|
|
|
|
zap.Error(res.err))
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
2020-11-30 14:59:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes all StorageEngine's components.
|
2023-03-23 14:59:14 +00:00
|
|
|
func (e *StorageEngine) Init(ctx context.Context) error {
|
2022-06-27 08:36:32 +00:00
|
|
|
e.mtx.Lock()
|
|
|
|
defer e.mtx.Unlock()
|
2020-11-30 14:59:00 +00:00
|
|
|
|
2022-06-08 11:03:29 +00:00
|
|
|
var wg sync.WaitGroup
|
2022-06-27 08:36:32 +00:00
|
|
|
var errCh = make(chan shardInitError, len(e.shards))
|
2022-06-08 11:03:29 +00:00
|
|
|
|
2020-11-30 14:59:00 +00:00
|
|
|
for id, sh := range e.shards {
|
2022-06-08 11:03:29 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(id string, sh *shard.Shard) {
|
|
|
|
defer wg.Done()
|
2023-03-23 14:59:14 +00:00
|
|
|
if err := sh.Init(ctx); err != nil {
|
2022-06-27 08:36:32 +00:00
|
|
|
errCh <- shardInitError{
|
|
|
|
err: err,
|
|
|
|
id: id,
|
|
|
|
}
|
2022-06-08 11:03:29 +00:00
|
|
|
}
|
|
|
|
}(id, sh.Shard)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(errCh)
|
|
|
|
|
2022-06-27 08:36:32 +00:00
|
|
|
for res := range errCh {
|
|
|
|
if res.err != nil {
|
|
|
|
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
|
2022-06-27 08:36:32 +00:00
|
|
|
zap.String("id", res.id),
|
|
|
|
zap.Error(res.err))
|
|
|
|
|
2023-02-06 10:30:33 +00:00
|
|
|
sh := e.shards[res.id]
|
|
|
|
delete(e.shards, res.id)
|
|
|
|
|
|
|
|
err := sh.Close()
|
|
|
|
if err != nil {
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
|
2023-02-06 10:30:33 +00:00
|
|
|
zap.String("id", res.id),
|
|
|
|
zap.Error(res.err))
|
|
|
|
}
|
|
|
|
|
2022-06-27 08:36:32 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
|
2020-11-30 14:59:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-27 08:36:32 +00:00
|
|
|
if len(e.shards) == 0 {
|
|
|
|
return errors.New("failed initialization on all shards")
|
|
|
|
}
|
|
|
|
|
2022-11-10 10:58:46 +00:00
|
|
|
e.wg.Add(1)
|
|
|
|
go e.setModeLoop()
|
|
|
|
|
2020-11-30 14:59:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-09 15:46:12 +00:00
|
|
|
var errClosed = errors.New("storage engine is closed")
|
|
|
|
|
|
|
|
// Close releases all StorageEngine's components. Waits for all data-related operations to complete.
|
2021-11-11 13:58:07 +00:00
|
|
|
// After the call, all the next ones will fail.
|
2021-11-09 15:46:12 +00:00
|
|
|
//
|
2022-11-10 10:58:46 +00:00
|
|
|
// The method MUST only be called when the application exits.
|
2020-11-30 14:59:00 +00:00
|
|
|
func (e *StorageEngine) Close() error {
|
2022-11-10 10:58:46 +00:00
|
|
|
close(e.closeCh)
|
|
|
|
defer e.wg.Wait()
|
2021-11-09 15:46:12 +00:00
|
|
|
return e.setBlockExecErr(errClosed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// closes all shards. Never returns an error, shard errors are logged.
|
2021-11-11 13:58:07 +00:00
|
|
|
func (e *StorageEngine) close(releasePools bool) error {
|
2020-11-30 14:59:00 +00:00
|
|
|
e.mtx.RLock()
|
|
|
|
defer e.mtx.RUnlock()
|
|
|
|
|
2021-11-11 13:58:07 +00:00
|
|
|
if releasePools {
|
|
|
|
for _, p := range e.shardPools {
|
|
|
|
p.Release()
|
|
|
|
}
|
2021-10-19 13:10:55 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 14:59:00 +00:00
|
|
|
for id, sh := range e.shards {
|
|
|
|
if err := sh.Close(); err != nil {
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Debug(logs.EngineCouldNotCloseShard,
|
2020-11-30 14:59:00 +00:00
|
|
|
zap.String("id", id),
|
|
|
|
zap.String("error", err.Error()),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-11-09 15:46:12 +00:00
|
|
|
|
|
|
|
// executes op if execution is not blocked, otherwise returns blocking error.
|
|
|
|
//
|
|
|
|
// Can be called concurrently with setBlockExecErr.
|
2021-11-10 15:00:30 +00:00
|
|
|
func (e *StorageEngine) execIfNotBlocked(op func() error) error {
|
2021-11-09 15:46:12 +00:00
|
|
|
e.blockExec.mtx.RLock()
|
|
|
|
defer e.blockExec.mtx.RUnlock()
|
|
|
|
|
|
|
|
if e.blockExec.err != nil {
|
|
|
|
return e.blockExec.err
|
|
|
|
}
|
|
|
|
|
|
|
|
return op()
|
|
|
|
}
|
|
|
|
|
|
|
|
// sets the flag of blocking execution of all data operations according to err:
|
2022-08-15 16:20:20 +00:00
|
|
|
// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method
|
2021-11-11 13:58:07 +00:00
|
|
|
// (if err == errClosed => additionally releases pools and does not allow to resume executions).
|
2022-08-15 16:20:20 +00:00
|
|
|
// - otherwise, resumes execution. If exec was blocked, calls open method.
|
2021-11-09 15:46:12 +00:00
|
|
|
//
|
|
|
|
// Can be called concurrently with exec. In this case it waits for all executions to complete.
|
|
|
|
func (e *StorageEngine) setBlockExecErr(err error) error {
|
|
|
|
e.blockExec.mtx.Lock()
|
|
|
|
defer e.blockExec.mtx.Unlock()
|
|
|
|
|
|
|
|
prevErr := e.blockExec.err
|
|
|
|
|
2021-11-11 13:58:07 +00:00
|
|
|
wasClosed := errors.Is(prevErr, errClosed)
|
|
|
|
if wasClosed {
|
|
|
|
return errClosed
|
|
|
|
}
|
|
|
|
|
2021-11-09 15:46:12 +00:00
|
|
|
e.blockExec.err = err
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
if prevErr != nil { // block -> ok
|
|
|
|
return e.open()
|
|
|
|
}
|
|
|
|
} else if prevErr == nil { // ok -> block
|
2021-11-11 13:58:07 +00:00
|
|
|
return e.close(errors.Is(err, errClosed))
|
2021-11-09 15:46:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// otherwise do nothing
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-10 15:00:30 +00:00
|
|
|
// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err.
|
2021-11-09 15:46:12 +00:00
|
|
|
// To resume the execution, use ResumeExecution method.
|
|
|
|
//
|
|
|
|
// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources
|
|
|
|
// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions
|
2021-11-11 13:58:07 +00:00
|
|
|
// to complete). Returns error if any Close has been called before.
|
2021-11-09 15:46:12 +00:00
|
|
|
//
|
|
|
|
// Must not be called concurrently with either Open or Init.
|
|
|
|
//
|
|
|
|
// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution
|
|
|
|
// for this.
|
|
|
|
func (e *StorageEngine) BlockExecution(err error) error {
|
|
|
|
return e.setBlockExecErr(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ResumeExecution resumes the execution of any data-related operation.
|
|
|
|
// To block the execution, use BlockExecution method.
|
|
|
|
//
|
|
|
|
// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources
|
|
|
|
// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions
|
2021-11-11 13:58:07 +00:00
|
|
|
// to complete). Returns error if any Close has been called before.
|
2021-11-09 15:46:12 +00:00
|
|
|
//
|
|
|
|
// Must not be called concurrently with either Open or Init.
|
|
|
|
func (e *StorageEngine) ResumeExecution() error {
|
|
|
|
return e.setBlockExecErr(nil)
|
|
|
|
}
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
type ReConfiguration struct {
|
|
|
|
errorsThreshold uint32
|
|
|
|
shardPoolSize uint32
|
|
|
|
|
|
|
|
shards map[string][]shard.Option // meta path -> shard opts
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetErrorsThreshold sets a size amount of errors after which
|
|
|
|
// shard is moved to read-only mode.
|
|
|
|
func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
|
|
|
|
rCfg.errorsThreshold = errorsThreshold
|
|
|
|
}
|
|
|
|
|
2022-10-17 12:03:55 +00:00
|
|
|
// SetShardPoolSize sets a size of worker pool for each shard.
|
2022-09-26 21:39:34 +00:00
|
|
|
func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
|
|
|
|
rCfg.shardPoolSize = shardPoolSize
|
|
|
|
}
|
|
|
|
|
2022-10-05 12:53:42 +00:00
|
|
|
// AddShard adds a shard for the reconfiguration.
|
|
|
|
// Shard identifier is calculated from paths used in blobstor.
|
|
|
|
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
|
2022-09-26 21:39:34 +00:00
|
|
|
if rCfg.shards == nil {
|
|
|
|
rCfg.shards = make(map[string][]shard.Option)
|
|
|
|
}
|
|
|
|
|
2022-10-05 12:53:42 +00:00
|
|
|
if _, found := rCfg.shards[id]; found {
|
2022-09-26 21:39:34 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-10-05 12:53:42 +00:00
|
|
|
rCfg.shards[id] = opts
|
2022-09-26 21:39:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reload reloads StorageEngine's configuration in runtime.
|
2023-03-23 14:59:14 +00:00
|
|
|
func (e *StorageEngine) Reload(ctx context.Context, rcfg ReConfiguration) error {
|
2022-10-16 11:39:47 +00:00
|
|
|
type reloadInfo struct {
|
|
|
|
sh *shard.Shard
|
|
|
|
opts []shard.Option
|
|
|
|
}
|
|
|
|
|
2022-09-26 21:39:34 +00:00
|
|
|
e.mtx.RLock()
|
|
|
|
|
|
|
|
var shardsToRemove []string // shards IDs
|
2022-10-16 11:39:47 +00:00
|
|
|
var shardsToAdd []string // shard config identifiers (blobstor paths concatenation)
|
|
|
|
var shardsToReload []reloadInfo
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
// mark removed shards for removal
|
|
|
|
for id, sh := range e.shards {
|
2022-10-05 12:53:42 +00:00
|
|
|
_, ok := rcfg.shards[calculateShardID(sh.DumpInfo())]
|
2022-09-26 21:39:34 +00:00
|
|
|
if !ok {
|
|
|
|
shardsToRemove = append(shardsToRemove, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-16 11:39:47 +00:00
|
|
|
loop:
|
2022-10-05 12:53:42 +00:00
|
|
|
for newID := range rcfg.shards {
|
2022-09-26 21:39:34 +00:00
|
|
|
for _, sh := range e.shards {
|
2022-10-05 12:53:42 +00:00
|
|
|
// This calculation should be kept in sync with node
|
|
|
|
// configuration parsing during SIGHUP.
|
|
|
|
if newID == calculateShardID(sh.DumpInfo()) {
|
2022-10-16 11:39:47 +00:00
|
|
|
shardsToReload = append(shardsToReload, reloadInfo{
|
|
|
|
sh: sh.Shard,
|
|
|
|
opts: rcfg.shards[newID],
|
|
|
|
})
|
|
|
|
continue loop
|
2022-09-26 21:39:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-16 11:39:47 +00:00
|
|
|
shardsToAdd = append(shardsToAdd, newID)
|
2022-09-26 21:39:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
e.mtx.RUnlock()
|
|
|
|
|
2022-09-30 14:01:48 +00:00
|
|
|
e.removeShards(shardsToRemove...)
|
2022-09-26 21:39:34 +00:00
|
|
|
|
2022-10-16 11:39:47 +00:00
|
|
|
for _, p := range shardsToReload {
|
|
|
|
err := p.sh.Reload(p.opts...)
|
|
|
|
if err != nil {
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Error(logs.EngineCouldNotReloadAShard,
|
2022-10-16 11:39:47 +00:00
|
|
|
zap.Stringer("shard id", p.sh.ID()),
|
|
|
|
zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-05 12:53:42 +00:00
|
|
|
for _, newID := range shardsToAdd {
|
|
|
|
sh, err := e.createShard(rcfg.shards[newID])
|
2022-09-26 21:39:34 +00:00
|
|
|
if err != nil {
|
2022-10-05 12:53:42 +00:00
|
|
|
return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
|
2022-09-26 21:39:34 +00:00
|
|
|
}
|
|
|
|
|
2022-09-27 21:43:38 +00:00
|
|
|
idStr := sh.ID().String()
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
err = sh.Open()
|
|
|
|
if err == nil {
|
2023-03-23 14:59:14 +00:00
|
|
|
err = sh.Init(ctx)
|
2022-09-26 21:39:34 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2022-09-27 21:43:38 +00:00
|
|
|
_ = sh.Close()
|
2022-09-26 21:39:34 +00:00
|
|
|
return fmt.Errorf("could not init %s shard: %w", idStr, err)
|
|
|
|
}
|
|
|
|
|
2022-09-27 21:43:38 +00:00
|
|
|
err = e.addShard(sh)
|
|
|
|
if err != nil {
|
|
|
|
_ = sh.Close()
|
|
|
|
return fmt.Errorf("could not add %s shard: %w", idStr, err)
|
|
|
|
}
|
|
|
|
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
|
2022-09-26 21:39:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-10-05 12:53:42 +00:00
|
|
|
|
|
|
|
func calculateShardID(info shard.Info) string {
|
|
|
|
// This calculation should be kept in sync with node
|
|
|
|
// configuration parsing during SIGHUP.
|
|
|
|
var sb strings.Builder
|
|
|
|
for _, sub := range info.BlobStorInfo.SubStorages {
|
|
|
|
sb.WriteString(filepath.Clean(sub.Path))
|
|
|
|
}
|
|
|
|
return sb.String()
|
|
|
|
}
|