Aleksey Savchuk
f0c43c8d80
All checks were successful
Vulncheck / Vulncheck (pull_request) Successful in 3m1s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m29s
Tests and linters / gopls check (pull_request) Successful in 3m50s
Tests and linters / Lint (pull_request) Successful in 4m35s
DCO action / DCO (pull_request) Successful in 5m12s
Tests and linters / Run gofumpt (pull_request) Successful in 5m33s
Build / Build Components (pull_request) Successful in 5m45s
Tests and linters / Tests with -race (pull_request) Successful in 6m37s
Tests and linters / Tests (pull_request) Successful in 7m17s
Tests and linters / Staticcheck (pull_request) Successful in 7m36s
Tests and linters / Run gofumpt (push) Successful in 1m22s
Tests and linters / Staticcheck (push) Successful in 3m19s
Tests and linters / Lint (push) Successful in 4m35s
Vulncheck / Vulncheck (push) Successful in 5m20s
Build / Build Components (push) Successful in 6m16s
Pre-commit hooks / Pre-commit (push) Successful in 6m37s
Tests and linters / Tests (push) Successful in 6m48s
Tests and linters / Tests with -race (push) Successful in 7m15s
Tests and linters / gopls check (push) Successful in 7m27s
Use `zap.Error` instead of `zap.String` for logging errors: change all expressions like `zap.String("error", err.Error())` or `zap.String("err", err.Error())` to `zap.Error(err)`. Leave similar expressions with other messages unchanged, for example, `zap.String("last_error", lastErr.Error())` or `zap.String("reason", ctx.Err().Error())`. This change was made by applying the following patch: ```diff @@ var err expression @@ -zap.String("error", err.Error()) +zap.Error(err) @@ var err expression @@ -zap.String("err", err.Error()) +zap.Error(err) ``` Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
178 lines
4.8 KiB
Go
178 lines
4.8 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"fmt"
|
|
"log"
|
|
"os"
|
|
"sync"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
const (
|
|
// SuccessReturnCode returns when application closed without panic.
|
|
SuccessReturnCode = 0
|
|
)
|
|
|
|
// prints err to standard logger and calls os.Exit(1).
|
|
func fatalOnErr(err error) {
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
}
|
|
|
|
// prints err with details to standard logger and calls os.Exit(1).
|
|
func fatalOnErrDetails(details string, err error) {
|
|
if err != nil {
|
|
log.Fatal(fmt.Errorf("%s: %w", details, err))
|
|
}
|
|
}
|
|
|
|
func main() {
|
|
configFile := flag.String("config", "", "path to config")
|
|
configDir := flag.String("config-dir", "", "path to config directory")
|
|
versionFlag := flag.Bool("version", false, "frostfs node version")
|
|
dryRunFlag := flag.Bool("check", false, "validate configuration and exit")
|
|
flag.Parse()
|
|
|
|
if *versionFlag {
|
|
fmt.Print(misc.BuildInfo("FrostFS Storage node"))
|
|
|
|
os.Exit(SuccessReturnCode)
|
|
}
|
|
|
|
appCfg := config.New(*configFile, *configDir, config.EnvPrefix)
|
|
|
|
err := validateConfig(appCfg)
|
|
fatalOnErr(err)
|
|
|
|
if *dryRunFlag {
|
|
return
|
|
}
|
|
|
|
c := initCfg(appCfg)
|
|
|
|
var ctx context.Context
|
|
ctx, c.ctxCancel = context.WithCancel(context.Background())
|
|
|
|
c.setHealthStatus(ctx, control.HealthStatus_STARTING)
|
|
|
|
initApp(ctx, c)
|
|
|
|
bootUp(ctx, c)
|
|
|
|
c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
|
|
|
|
wait(c)
|
|
}
|
|
|
|
func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
|
|
c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
|
|
initializer(c)
|
|
c.log.Info(ctx, name+" service has been successfully initialized")
|
|
}
|
|
|
|
func initApp(ctx context.Context, c *cfg) {
|
|
c.wg.Add(1)
|
|
go func() {
|
|
c.signalWatcher(ctx)
|
|
c.wg.Done()
|
|
}()
|
|
|
|
setRuntimeParameters(ctx, c)
|
|
metrics, _ := metricsComponent(c)
|
|
initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
|
|
initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
|
|
|
|
initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
|
|
|
|
initLocalStorage(ctx, c)
|
|
|
|
initAndLog(ctx, c, "storage engine", func(c *cfg) {
|
|
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
|
|
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
|
|
})
|
|
|
|
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
|
|
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
|
|
|
|
initAccessPolicyEngine(ctx, c)
|
|
initAndLog(ctx, c, "access policy engine", func(c *cfg) {
|
|
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
|
|
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
|
|
})
|
|
|
|
initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
|
|
initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
|
|
initAndLog(ctx, c, "session", initSessionService)
|
|
initAndLog(ctx, c, "object", initObjectService)
|
|
initAndLog(ctx, c, "tree", initTreeService)
|
|
initAndLog(ctx, c, "apemanager", initAPEManagerService)
|
|
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
|
|
|
|
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
|
}
|
|
|
|
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
|
|
c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
|
|
starter(ctx, c)
|
|
|
|
if logSuccess {
|
|
c.log.Info(ctx, name+" service started successfully")
|
|
}
|
|
}
|
|
|
|
func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
|
|
c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
|
|
|
|
err := stopper(ctx)
|
|
if err != nil {
|
|
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
|
zap.Error(err),
|
|
)
|
|
}
|
|
|
|
c.log.Debug(ctx, name+" service has been stopped")
|
|
}
|
|
|
|
func bootUp(ctx context.Context, c *cfg) {
|
|
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
|
|
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
|
|
|
|
bootstrapNode(ctx, c)
|
|
startWorkers(ctx, c)
|
|
}
|
|
|
|
func wait(c *cfg) {
|
|
c.log.Info(context.Background(), logs.CommonApplicationStarted,
|
|
zap.String("version", misc.Version))
|
|
|
|
<-c.done // graceful shutdown
|
|
|
|
drain := &sync.WaitGroup{}
|
|
drain.Add(1)
|
|
go func() {
|
|
defer drain.Done()
|
|
for err := range c.internalErr {
|
|
c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
|
|
zap.String("message", err.Error()))
|
|
}
|
|
}()
|
|
|
|
c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
|
|
|
|
c.wg.Wait()
|
|
|
|
close(c.internalErr)
|
|
drain.Wait()
|
|
}
|
|
|
|
func (c *cfg) onShutdown(f func()) {
|
|
c.closers = append(c.closers, closer{"", f})
|
|
}
|