[#168] node: Refactor node config

Resolve containedctx linter for cfg

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
refactoring/OBJECT-3610_node
Dmitrii Stepanov 2023-03-23 17:59:14 +03:00
parent 8426d25f4b
commit a7c79c773a
20 changed files with 93 additions and 83 deletions

View File

@ -303,10 +303,8 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
// the application life cycle. // the application life cycle.
// It should not contain any read configuration values, component-specific // It should not contain any read configuration values, component-specific
// helpers and fields. // helpers and fields.
// nolint: containedctx
type internals struct { type internals struct {
ctx context.Context done chan struct{}
ctxCancel func()
internalErr chan error // channel for internal application errors at runtime internalErr chan error // channel for internal application errors at runtime
appCfg *config.Config appCfg *config.Config
@ -570,7 +568,7 @@ func initCfg(appCfg *config.Config) *cfg {
fatalOnErr(err) fatalOnErr(err)
c.internals = internals{ c.internals = internals{
ctx: context.Background(), done: make(chan struct{}),
appCfg: appCfg, appCfg: appCfg,
internalErr: make(chan error), internalErr: make(chan error),
log: log, log: log,
@ -940,7 +938,7 @@ type dCmp struct {
reloadFunc func() error reloadFunc func() error
} }
func (c *cfg) signalWatcher() { func (c *cfg) signalWatcher(ctx context.Context) {
ch := make(chan os.Signal, 1) ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
@ -949,7 +947,7 @@ func (c *cfg) signalWatcher() {
case sig := <-ch: case sig := <-ch:
switch sig { switch sig {
case syscall.SIGHUP: case syscall.SIGHUP:
c.reloadConfig() c.reloadConfig(ctx)
case syscall.SIGTERM, syscall.SIGINT: case syscall.SIGTERM, syscall.SIGINT:
c.log.Info("termination signal has been received, stopping...") c.log.Info("termination signal has been received, stopping...")
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING) // TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
@ -971,7 +969,7 @@ func (c *cfg) signalWatcher() {
} }
} }
func (c *cfg) reloadConfig() { func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info("SIGHUP has been received, rereading configuration...") c.log.Info("SIGHUP has been received, rereading configuration...")
err := c.readConfig(c.appCfg) err := c.readConfig(c.appCfg)
@ -999,10 +997,10 @@ func (c *cfg) reloadConfig() {
} else { } else {
cmp.preReload = disableMetricsSvc cmp.preReload = disableMetricsSvc
} }
components = append(components, dCmp{cmp.name, cmp.reload}) components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
} }
if cmp, updated := pprofComponent(c); updated { if cmp, updated := pprofComponent(c); updated {
components = append(components, dCmp{cmp.name, cmp.reload}) components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
} }
// Storage Engine // Storage Engine
@ -1012,7 +1010,7 @@ func (c *cfg) reloadConfig() {
rcfg.AddShard(optsWithID.configID, optsWithID.shOpts) rcfg.AddShard(optsWithID.configID, optsWithID.shOpts)
} }
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(rcfg) err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil { if err != nil {
c.log.Error("storage engine configuration update", zap.Error(err)) c.log.Error("storage engine configuration update", zap.Error(err))
return return
@ -1033,7 +1031,7 @@ func (c *cfg) reloadConfig() {
func (c *cfg) shutdown() { func (c *cfg) shutdown() {
c.setHealthStatus(control.HealthStatus_SHUTTING_DOWN) c.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
c.ctxCancel() c.done <- struct{}{}
for i := range c.closers { for i := range c.closers {
c.closers[len(c.closers)-1-i].fn() c.closers[len(c.closers)-1-i].fn()
} }

View File

@ -65,7 +65,7 @@ func initControlService(c *cfg) {
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc) control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(c, "control", false, func(c *cfg) { runAndLog(ctx, c, "control", false, func(context.Context, *cfg) {
fatalOnErr(c.cfgControlService.server.Serve(lis)) fatalOnErr(c.cfgControlService.server.Serve(lis))
}) })
})) }))

View File

@ -41,14 +41,14 @@ func (cmp *httpComponent) init(c *cfg) {
c.workers = append(c.workers, worker{ c.workers = append(c.workers, worker{
cmp.name, cmp.name,
func(ctx context.Context) { func(ctx context.Context) {
runAndLog(c, cmp.name, false, func(c *cfg) { runAndLog(ctx, c, cmp.name, false, func(context.Context, *cfg) {
fatalOnErr(srv.Serve()) fatalOnErr(srv.Serve())
}) })
}, },
}) })
} }
func (cmp *httpComponent) reload() error { func (cmp *httpComponent) reload(ctx context.Context) error {
if cmp.preReload != nil { if cmp.preReload != nil {
cmp.preReload(cmp.cfg) cmp.preReload(cmp.cfg)
} }
@ -64,7 +64,7 @@ func (cmp *httpComponent) reload() error {
cmp.init(cmp.cfg) cmp.init(cmp.cfg)
// Start worker // Start worker
if cmp.enabled { if cmp.enabled {
startWorker(cmp.cfg, *getWorker(cmp.cfg, cmp.name)) startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
} }
return nil return nil
} }

View File

@ -56,15 +56,17 @@ func main() {
c := initCfg(appCfg) c := initCfg(appCfg)
initApp(c) ctx, cancel := context.WithCancel(context.Background())
initApp(ctx, c)
c.setHealthStatus(control.HealthStatus_STARTING) c.setHealthStatus(control.HealthStatus_STARTING)
bootUp(c) bootUp(ctx, c)
c.setHealthStatus(control.HealthStatus_READY) c.setHealthStatus(control.HealthStatus_READY)
wait(c) wait(c, cancel)
} }
func initAndLog(c *cfg, name string, initializer func(*cfg)) { func initAndLog(c *cfg, name string, initializer func(*cfg)) {
@ -73,12 +75,10 @@ func initAndLog(c *cfg, name string, initializer func(*cfg)) {
c.log.Info(fmt.Sprintf("%s service has been successfully initialized", name)) c.log.Info(fmt.Sprintf("%s service has been successfully initialized", name))
} }
func initApp(c *cfg) { func initApp(ctx context.Context, c *cfg) {
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
c.wg.Add(1) c.wg.Add(1)
go func() { go func() {
c.signalWatcher() c.signalWatcher(ctx)
c.wg.Done() c.wg.Done()
}() }()
@ -91,7 +91,7 @@ func initApp(c *cfg) {
initAndLog(c, "storage engine", func(c *cfg) { initAndLog(c, "storage engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open()) fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open())
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init()) fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
}) })
initAndLog(c, "gRPC", initGRPC) initAndLog(c, "gRPC", initGRPC)
@ -105,12 +105,12 @@ func initApp(c *cfg) {
initAndLog(c, "tree", initTreeService) initAndLog(c, "tree", initTreeService)
initAndLog(c, "control", initControlService) initAndLog(c, "control", initControlService)
initAndLog(c, "morph notifications", listenMorphNotifications) initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
} }
func runAndLog(c *cfg, name string, logSuccess bool, starter func(*cfg)) { func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
c.log.Info(fmt.Sprintf("starting %s service...", name)) c.log.Info(fmt.Sprintf("starting %s service...", name))
starter(c) starter(ctx, c)
if logSuccess { if logSuccess {
c.log.Info(fmt.Sprintf("%s service started successfully", name)) c.log.Info(fmt.Sprintf("%s service started successfully", name))
@ -130,20 +130,22 @@ func stopAndLog(c *cfg, name string, stopper func() error) {
c.log.Debug(fmt.Sprintf("%s service has been stopped", name)) c.log.Debug(fmt.Sprintf("%s service has been stopped", name))
} }
func bootUp(c *cfg) { func bootUp(ctx context.Context, c *cfg) {
runAndLog(c, "NATS", true, connectNats) runAndLog(ctx, c, "NATS", true, connectNats)
runAndLog(c, "gRPC", false, serveGRPC) runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
runAndLog(c, "notary", true, makeAndWaitNotaryDeposit) runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
bootstrapNode(c) bootstrapNode(c)
startWorkers(c) startWorkers(ctx, c)
} }
func wait(c *cfg) { func wait(c *cfg, cancel func()) {
c.log.Info("application started", c.log.Info("application started",
zap.String("version", misc.Version)) zap.String("version", misc.Version))
<-c.ctx.Done() // graceful shutdown <-c.done // graceful shutdown
cancel()
c.log.Debug("waiting for all processes to stop") c.log.Debug("waiting for all processes to stop")

View File

@ -108,7 +108,7 @@ func initMorphComponents(c *cfg) {
c.cfgNetmap.wrapper = wrap c.cfgNetmap.wrapper = wrap
} }
func makeAndWaitNotaryDeposit(c *cfg) { func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// skip notary deposit in non-notary environments // skip notary deposit in non-notary environments
if !c.cfgMorph.notaryEnabled { if !c.cfgMorph.notaryEnabled {
return return
@ -125,7 +125,7 @@ func makeAndWaitNotaryDeposit(c *cfg) {
return return
} }
err = waitNotaryDeposit(c, tx) err = waitNotaryDeposit(ctx, c, tx)
fatalOnErr(err) fatalOnErr(err)
} }
@ -154,11 +154,11 @@ var (
errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network") errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network")
) )
func waitNotaryDeposit(c *cfg, tx util.Uint256) error { func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
for i := 0; i < notaryDepositRetriesAmount; i++ { for i := 0; i < notaryDepositRetriesAmount; i++ {
select { select {
case <-c.ctx.Done(): case <-ctx.Done():
return c.ctx.Err() return ctx.Err()
default: default:
} }
@ -171,7 +171,7 @@ func waitNotaryDeposit(c *cfg, tx util.Uint256) error {
return errNotaryDepositFail return errNotaryDepositFail
} }
err = c.cfgMorph.client.Wait(c.ctx, 1) err = c.cfgMorph.client.Wait(ctx, 1)
if err != nil { if err != nil {
return fmt.Errorf("could not wait for one block in chain: %w", err) return fmt.Errorf("could not wait for one block in chain: %w", err)
} }
@ -180,7 +180,7 @@ func waitNotaryDeposit(c *cfg, tx util.Uint256) error {
return errNotaryDepositTimeout return errNotaryDepositTimeout
} }
func listenMorphNotifications(c *cfg) { func listenMorphNotifications(ctx context.Context, c *cfg) {
// listenerPoolCap is a capacity of a // listenerPoolCap is a capacity of a
// worker pool inside the listener. It // worker pool inside the listener. It
// is used to prevent blocking in neo-go: // is used to prevent blocking in neo-go:
@ -200,7 +200,7 @@ func listenMorphNotifications(c *cfg) {
c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error())) c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
} }
subs, err = subscriber.New(c.ctx, &subscriber.Params{ subs, err = subscriber.New(ctx, &subscriber.Params{
Log: c.log, Log: c.log,
StartFromBlock: fromSideChainBlock, StartFromBlock: fromSideChainBlock,
Client: c.cfgMorph.client, Client: c.cfgMorph.client,
@ -214,9 +214,9 @@ func listenMorphNotifications(c *cfg) {
}) })
fatalOnErr(err) fatalOnErr(err)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { c.workers = append(c.workers, newWorkerFromFunc(func(wCtx context.Context) {
runAndLog(c, "morph notification", false, func(c *cfg) { runAndLog(wCtx, c, "morph notification", false, func(lCtx context.Context, c *cfg) {
lis.ListenWithError(ctx, c.internalErr) lis.ListenWithError(lCtx, c.internalErr)
}) })
})) }))

View File

@ -1,6 +1,7 @@
package main package main
import ( import (
"context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@ -155,13 +156,13 @@ func initNotifications(c *cfg) {
} }
} }
func connectNats(c *cfg) { func connectNats(ctx context.Context, c *cfg) {
if !c.cfgNotifications.enabled { if !c.cfgNotifications.enabled {
return return
} }
endpoint := nodeconfig.Notification(c.appCfg).Endpoint() endpoint := nodeconfig.Notification(c.appCfg).Endpoint()
err := c.cfgNotifications.nw.w.Connect(c.ctx, endpoint) err := c.cfgNotifications.nw.w.Connect(ctx, endpoint)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not connect to a nats endpoint %s: %v", endpoint, err)) panic(fmt.Sprintf("could not connect to a nats endpoint %s: %v", endpoint, err))
} }

View File

@ -15,17 +15,17 @@ func newWorkerFromFunc(fn func(ctx context.Context)) worker {
} }
} }
func startWorkers(c *cfg) { func startWorkers(ctx context.Context, c *cfg) {
for _, wrk := range c.workers { for _, wrk := range c.workers {
startWorker(c, wrk) startWorker(ctx, c, wrk)
} }
} }
func startWorker(c *cfg, wrk worker) { func startWorker(ctx context.Context, c *cfg, wrk worker) {
c.wg.Add(1) c.wg.Add(1)
go func(w worker) { go func(w worker) {
w.fn(c.ctx) w.fn(ctx)
c.wg.Done() c.wg.Done()
}(wrk) }(wrk)
} }

View File

@ -1,6 +1,7 @@
package engine package engine
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"path/filepath" "path/filepath"
@ -68,7 +69,7 @@ func (e *StorageEngine) open() error {
} }
// Init initializes all StorageEngine's components. // Init initializes all StorageEngine's components.
func (e *StorageEngine) Init() error { func (e *StorageEngine) Init(ctx context.Context) error {
e.mtx.Lock() e.mtx.Lock()
defer e.mtx.Unlock() defer e.mtx.Unlock()
@ -79,7 +80,7 @@ func (e *StorageEngine) Init() error {
wg.Add(1) wg.Add(1)
go func(id string, sh *shard.Shard) { go func(id string, sh *shard.Shard) {
defer wg.Done() defer wg.Done()
if err := sh.Init(); err != nil { if err := sh.Init(ctx); err != nil {
errCh <- shardInitError{ errCh <- shardInitError{
err: err, err: err,
id: id, id: id,
@ -264,7 +265,7 @@ func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
} }
// Reload reloads StorageEngine's configuration in runtime. // Reload reloads StorageEngine's configuration in runtime.
func (e *StorageEngine) Reload(rcfg ReConfiguration) error { func (e *StorageEngine) Reload(ctx context.Context, rcfg ReConfiguration) error {
type reloadInfo struct { type reloadInfo struct {
sh *shard.Shard sh *shard.Shard
opts []shard.Option opts []shard.Option
@ -324,7 +325,7 @@ loop:
err = sh.Open() err = sh.Open()
if err == nil { if err == nil {
err = sh.Init() err = sh.Init(ctx)
} }
if err != nil { if err != nil {
_ = sh.Close() _ = sh.Close()

View File

@ -1,6 +1,7 @@
package engine package engine
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io/fs" "io/fs"
@ -169,7 +170,7 @@ func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Optio
err = e.Open() err = e.Open()
if err == nil { if err == nil {
require.Error(t, e.Init()) require.Error(t, e.Init(context.Background()))
} }
} }
@ -180,7 +181,7 @@ func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Optio
beforeReload() beforeReload()
require.NoError(t, e.Reload(ReConfiguration{ require.NoError(t, e.Reload(context.Background(), ReConfiguration{
shards: map[string][]shard.Option{configID: opts}, shards: map[string][]shard.Option{configID: opts},
})) }))
@ -273,7 +274,7 @@ func TestReload(t *testing.T) {
} }
rcfg.AddShard(currShards[0], nil) // same path rcfg.AddShard(currShards[0], nil) // same path
require.NoError(t, e.Reload(rcfg)) require.NoError(t, e.Reload(context.Background(), rcfg))
// no new paths => no new shards // no new paths => no new shards
require.Equal(t, shardNum, len(e.shards)) require.Equal(t, shardNum, len(e.shards))
@ -286,7 +287,7 @@ func TestReload(t *testing.T) {
meta.WithPath(newMeta), meta.WithPath(newMeta),
meta.WithEpochState(epochState{}), meta.WithEpochState(epochState{}),
)}) )})
require.NoError(t, e.Reload(rcfg)) require.NoError(t, e.Reload(context.Background(), rcfg))
require.Equal(t, shardNum+1, len(e.shards)) require.Equal(t, shardNum+1, len(e.shards))
require.Equal(t, shardNum+1, len(e.shardPools)) require.Equal(t, shardNum+1, len(e.shardPools))
@ -303,7 +304,7 @@ func TestReload(t *testing.T) {
rcfg.AddShard(currShards[i], nil) rcfg.AddShard(currShards[i], nil)
} }
require.NoError(t, e.Reload(rcfg)) require.NoError(t, e.Reload(context.Background(), rcfg))
// removed one // removed one
require.Equal(t, shardNum-1, len(e.shards)) require.Equal(t, shardNum-1, len(e.shards))
@ -339,7 +340,7 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
require.Equal(t, num, len(e.shardPools)) require.Equal(t, num, len(e.shardPools))
require.NoError(t, e.Open()) require.NoError(t, e.Open())
require.NoError(t, e.Init()) require.NoError(t, e.Init(context.Background()))
return e, currShards return e, currShards
} }

View File

@ -1,6 +1,7 @@
package engine package engine
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -159,7 +160,7 @@ func testNewShard(t testing.TB, id int) *shard.Shard {
)) ))
require.NoError(t, s.Open()) require.NoError(t, s.Open())
require.NoError(t, s.Init()) require.NoError(t, s.Init(context.Background()))
return s return s
} }
@ -185,7 +186,7 @@ func testEngineFromShardOpts(t *testing.T, num int, extraOpts []shard.Option) *S
} }
require.NoError(t, engine.Open()) require.NoError(t, engine.Open())
require.NoError(t, engine.Init()) require.NoError(t, engine.Init(context.Background()))
return engine return engine
} }

View File

@ -1,6 +1,7 @@
package engine package engine
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -76,7 +77,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
} }
} }
require.NoError(t, e.Open()) require.NoError(t, e.Open())
require.NoError(t, e.Init()) require.NoError(t, e.Init(context.Background()))
return &testEngine{ return &testEngine{
ng: e, ng: e,

View File

@ -1,6 +1,7 @@
package engine package engine
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"os" "os"
@ -51,7 +52,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
require.NoError(t, err) require.NoError(t, err)
} }
require.NoError(t, e.Open()) require.NoError(t, e.Open())
require.NoError(t, e.Init()) require.NoError(t, e.Init(context.Background()))
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids)) objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))

View File

@ -1,6 +1,7 @@
package shard package shard
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
@ -82,7 +83,7 @@ func (x *metabaseSynchronizer) Init() error {
} }
// Init initializes all Shard's components. // Init initializes all Shard's components.
func (s *Shard) Init() error { func (s *Shard) Init(ctx context.Context) error {
type initializer interface { type initializer interface {
Init() error Init() error
} }
@ -151,7 +152,7 @@ func (s *Shard) Init() error {
}, },
} }
s.gc.init() s.gc.init(ctx)
return nil return nil
} }

View File

@ -1,6 +1,7 @@
package shard package shard
import ( import (
"context"
"io/fs" "io/fs"
"math" "math"
"os" "os"
@ -83,7 +84,7 @@ func TestShardOpen(t *testing.T) {
sh := newShard() sh := newShard()
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadWrite, sh.GetMode()) require.Equal(t, mode.ReadWrite, sh.GetMode())
require.NoError(t, sh.Close()) require.NoError(t, sh.Close())
@ -92,7 +93,7 @@ func TestShardOpen(t *testing.T) {
sh = newShard() sh = newShard()
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadOnly, sh.GetMode()) require.Equal(t, mode.ReadOnly, sh.GetMode())
require.Error(t, sh.SetMode(mode.ReadWrite)) require.Error(t, sh.SetMode(mode.ReadWrite))
require.Equal(t, mode.ReadOnly, sh.GetMode()) require.Equal(t, mode.ReadOnly, sh.GetMode())
@ -103,7 +104,7 @@ func TestShardOpen(t *testing.T) {
sh = newShard() sh = newShard()
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.DegradedReadOnly, sh.GetMode()) require.Equal(t, mode.DegradedReadOnly, sh.GetMode())
require.NoError(t, sh.Close()) require.NoError(t, sh.Close())
} }
@ -128,7 +129,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))), WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{}))) WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})))
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
obj := objecttest.Object() obj := objecttest.Object()
obj.SetType(objectSDK.TypeRegular) obj.SetType(objectSDK.TypeRegular)
@ -150,7 +151,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta_new")), meta.WithEpochState(epochState{})), WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta_new")), meta.WithEpochState(epochState{})),
WithRefillMetabase(true)) WithRefillMetabase(true))
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
var getPrm GetPrm var getPrm GetPrm
getPrm.SetAddress(addr) getPrm.SetAddress(addr)
@ -188,7 +189,7 @@ func TestRefillMetabase(t *testing.T) {
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
// initialize Blobstor // initialize Blobstor
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
const objNum = 5 const objNum = 5
@ -355,7 +356,7 @@ func TestRefillMetabase(t *testing.T) {
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
// initialize Blobstor // initialize Blobstor
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
defer sh.Close() defer sh.Close()

View File

@ -102,7 +102,7 @@ func defaultGCCfg() gcCfg {
} }
} }
func (gc *gc) init() { func (gc *gc) init(ctx context.Context) {
sz := 0 sz := 0
for _, v := range gc.mEventHandler { for _, v := range gc.mEventHandler {
@ -115,10 +115,10 @@ func (gc *gc) init() {
gc.wg.Add(2) gc.wg.Add(2)
go gc.tickRemover() go gc.tickRemover()
go gc.listenEvents() go gc.listenEvents(ctx)
} }
func (gc *gc) listenEvents() { func (gc *gc) listenEvents(ctx context.Context) {
defer gc.wg.Done() defer gc.wg.Done()
for { for {
@ -136,8 +136,7 @@ func (gc *gc) listenEvents() {
v.cancelFunc() v.cancelFunc()
v.prevGroup.Wait() v.prevGroup.Wait()
var ctx context.Context ctx, v.cancelFunc = context.WithCancel(ctx)
ctx, v.cancelFunc = context.WithCancel(context.Background())
v.prevGroup.Add(len(v.handlers)) v.prevGroup.Add(len(v.handlers))

View File

@ -72,7 +72,7 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
sh = shard.New(opts...) sh = shard.New(opts...)
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
t.Cleanup(func() { t.Cleanup(func() {
releaseShard(sh, t) releaseShard(sh, t)

View File

@ -56,7 +56,7 @@ func TestShard_Lock(t *testing.T) {
sh = shard.New(opts...) sh = shard.New(opts...)
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
t.Cleanup(func() { t.Cleanup(func() {
releaseShard(sh, t) releaseShard(sh, t)

View File

@ -1,6 +1,7 @@
package shard_test package shard_test
import ( import (
"context"
"path/filepath" "path/filepath"
"testing" "testing"
@ -215,7 +216,7 @@ func shardWithMetrics(t *testing.T, path string) (*shard.Shard, *metricsStore) {
shard.WithMetricsWriter(mm), shard.WithMetricsWriter(mm),
) )
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
t.Cleanup(func() { t.Cleanup(func() {
sh.Close() sh.Close()

View File

@ -1,6 +1,7 @@
package shard package shard
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -51,7 +52,7 @@ func TestShardReload(t *testing.T) {
sh := New(opts...) sh := New(opts...)
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
objects := make([]objAddr, 5) objects := make([]objAddr, 5)
for i := range objects { for i := range objects {

View File

@ -1,6 +1,7 @@
package shard_test package shard_test
import ( import (
"context"
"path/filepath" "path/filepath"
"testing" "testing"
@ -80,7 +81,7 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
sh := shard.New(opts...) sh := shard.New(opts...)
require.NoError(t, sh.Open()) require.NoError(t, sh.Open())
require.NoError(t, sh.Init()) require.NoError(t, sh.Init(context.Background()))
return sh return sh
} }