2018-02-09 16:08:50 +00:00
|
|
|
package server
|
|
|
|
|
|
|
|
import (
|
2019-02-19 11:48:48 +00:00
|
|
|
"context"
|
2018-03-14 09:36:59 +00:00
|
|
|
"fmt"
|
2018-03-23 20:36:59 +00:00
|
|
|
"os"
|
|
|
|
"os/signal"
|
2018-02-09 16:08:50 +00:00
|
|
|
|
2020-06-17 18:13:37 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/cli/options"
|
2020-03-25 15:30:21 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/network"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/network/metrics"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/rpc/server"
|
2018-02-09 16:08:50 +00:00
|
|
|
"github.com/urfave/cli"
|
2019-12-30 07:43:05 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
"go.uber.org/zap/zapcore"
|
2018-02-09 16:08:50 +00:00
|
|
|
)
|
|
|
|
|
2019-10-19 20:58:45 +00:00
|
|
|
// NewCommands returns 'node' command.
|
|
|
|
func NewCommands() []cli.Command {
|
2019-10-21 05:41:05 +00:00
|
|
|
var cfgFlags = []cli.Flag{
|
|
|
|
cli.StringFlag{Name: "config-path"},
|
|
|
|
cli.BoolFlag{Name: "debug, d"},
|
|
|
|
}
|
2020-06-17 18:13:37 +00:00
|
|
|
cfgFlags = append(cfgFlags, options.Network...)
|
2019-10-21 05:41:05 +00:00
|
|
|
var cfgWithCountFlags = make([]cli.Flag, len(cfgFlags))
|
|
|
|
copy(cfgWithCountFlags, cfgFlags)
|
|
|
|
cfgWithCountFlags = append(cfgWithCountFlags,
|
|
|
|
cli.UintFlag{
|
|
|
|
Name: "count, c",
|
|
|
|
Usage: "number of blocks to be processed (default or 0: all chain)",
|
|
|
|
},
|
|
|
|
)
|
|
|
|
var cfgCountOutFlags = make([]cli.Flag, len(cfgWithCountFlags))
|
|
|
|
copy(cfgCountOutFlags, cfgWithCountFlags)
|
2019-12-27 09:11:57 +00:00
|
|
|
cfgCountOutFlags = append(cfgCountOutFlags,
|
|
|
|
cli.UintFlag{
|
|
|
|
Name: "start, s",
|
|
|
|
Usage: "block number to start from (default: 0)",
|
|
|
|
},
|
|
|
|
cli.StringFlag{
|
|
|
|
Name: "out, o",
|
|
|
|
Usage: "Output file (stdout if not given)",
|
|
|
|
},
|
|
|
|
)
|
2019-10-21 05:41:05 +00:00
|
|
|
var cfgCountInFlags = make([]cli.Flag, len(cfgWithCountFlags))
|
|
|
|
copy(cfgCountInFlags, cfgWithCountFlags)
|
2019-12-27 09:11:57 +00:00
|
|
|
cfgCountInFlags = append(cfgCountInFlags,
|
|
|
|
cli.UintFlag{
|
|
|
|
Name: "skip, s",
|
|
|
|
Usage: "number of blocks to skip (default: 0)",
|
|
|
|
},
|
|
|
|
cli.StringFlag{
|
|
|
|
Name: "in, i",
|
|
|
|
Usage: "Input file (stdin if not given)",
|
|
|
|
},
|
2020-02-06 15:47:03 +00:00
|
|
|
cli.StringFlag{
|
|
|
|
Name: "dump",
|
|
|
|
Usage: "directory for storing JSON dumps",
|
|
|
|
},
|
2019-12-27 09:11:57 +00:00
|
|
|
)
|
2019-10-21 05:41:05 +00:00
|
|
|
return []cli.Command{
|
|
|
|
{
|
|
|
|
Name: "node",
|
|
|
|
Usage: "start a NEO node",
|
|
|
|
Action: startServer,
|
|
|
|
Flags: cfgFlags,
|
2018-02-09 16:08:50 +00:00
|
|
|
},
|
2019-10-21 05:41:05 +00:00
|
|
|
{
|
|
|
|
Name: "db",
|
|
|
|
Usage: "database manipulations",
|
|
|
|
Subcommands: []cli.Command{
|
|
|
|
{
|
|
|
|
Name: "dump",
|
|
|
|
Usage: "dump blocks (starting with block #1) to the file",
|
|
|
|
Action: dumpDB,
|
|
|
|
Flags: cfgCountOutFlags,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "restore",
|
|
|
|
Usage: "restore blocks from the file",
|
|
|
|
Action: restoreDB,
|
|
|
|
Flags: cfgCountInFlags,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2018-02-09 16:08:50 +00:00
|
|
|
}
|
|
|
|
|
2019-02-19 11:48:48 +00:00
|
|
|
func newGraceContext() context.Context {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
stop := make(chan os.Signal, 1)
|
|
|
|
signal.Notify(stop, os.Interrupt)
|
|
|
|
go func() {
|
|
|
|
<-stop
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
return ctx
|
|
|
|
}
|
|
|
|
|
2019-10-21 05:41:05 +00:00
|
|
|
// getConfigFromContext looks at path and mode flags in the given config and
|
|
|
|
// returns appropriate config.
|
|
|
|
func getConfigFromContext(ctx *cli.Context) (config.Config, error) {
|
2019-08-30 08:22:11 +00:00
|
|
|
configPath := "./config"
|
2018-07-22 06:46:49 +00:00
|
|
|
if argCp := ctx.String("config-path"); argCp != "" {
|
|
|
|
configPath = argCp
|
|
|
|
}
|
2020-06-17 18:13:37 +00:00
|
|
|
return config.Load(configPath, options.GetNetwork(ctx))
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
|
|
|
|
2019-11-05 12:22:07 +00:00
|
|
|
// handleLoggingParams reads logging parameters.
|
|
|
|
// If user selected debug level -- function enables it.
|
|
|
|
// If logPath is configured -- function creates dir and file for logging.
|
2019-12-30 07:43:05 +00:00
|
|
|
func handleLoggingParams(ctx *cli.Context, cfg config.ApplicationConfiguration) (*zap.Logger, error) {
|
|
|
|
level := zapcore.InfoLevel
|
2019-10-21 05:41:05 +00:00
|
|
|
if ctx.Bool("debug") {
|
2019-12-30 07:43:05 +00:00
|
|
|
level = zapcore.DebugLevel
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
2019-11-05 12:22:07 +00:00
|
|
|
|
2019-12-30 07:43:05 +00:00
|
|
|
cc := zap.NewProductionConfig()
|
|
|
|
cc.DisableCaller = true
|
|
|
|
cc.DisableStacktrace = true
|
|
|
|
cc.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
|
|
|
|
cc.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
|
|
|
cc.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
|
|
|
cc.Encoding = "console"
|
|
|
|
cc.Level = zap.NewAtomicLevelAt(level)
|
2020-01-13 13:44:12 +00:00
|
|
|
cc.Sampling = nil
|
2019-12-30 07:43:05 +00:00
|
|
|
|
2019-11-05 12:22:07 +00:00
|
|
|
if logPath := cfg.LogPath; logPath != "" {
|
2019-11-06 14:12:33 +00:00
|
|
|
if err := io.MakeDirForFile(logPath, "logger"); err != nil {
|
2019-12-30 07:43:05 +00:00
|
|
|
return nil, err
|
2019-11-05 12:22:07 +00:00
|
|
|
}
|
2019-12-30 07:43:05 +00:00
|
|
|
|
|
|
|
cc.OutputPaths = []string{logPath}
|
2019-11-05 12:22:07 +00:00
|
|
|
}
|
2019-12-30 07:43:05 +00:00
|
|
|
|
|
|
|
return cc.Build()
|
2019-11-05 12:22:07 +00:00
|
|
|
}
|
|
|
|
|
2019-12-30 07:43:05 +00:00
|
|
|
func initBCWithMetrics(cfg config.Config, log *zap.Logger) (*core.Blockchain, *metrics.Service, *metrics.Service, error) {
|
|
|
|
chain, err := initBlockChain(cfg, log)
|
2019-12-16 14:04:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, cli.NewExitError(err, 1)
|
|
|
|
}
|
2020-08-25 12:41:50 +00:00
|
|
|
configureAddresses(&cfg.ApplicationConfiguration)
|
2019-12-30 07:43:05 +00:00
|
|
|
prometheus := metrics.NewPrometheusService(cfg.ApplicationConfiguration.Prometheus, log)
|
|
|
|
pprof := metrics.NewPprofService(cfg.ApplicationConfiguration.Pprof, log)
|
2019-12-16 14:04:18 +00:00
|
|
|
|
|
|
|
go chain.Run()
|
|
|
|
go prometheus.Start()
|
|
|
|
go pprof.Start()
|
|
|
|
|
|
|
|
return chain, prometheus, pprof, nil
|
|
|
|
}
|
|
|
|
|
2019-10-21 05:41:05 +00:00
|
|
|
func dumpDB(ctx *cli.Context) error {
|
|
|
|
cfg, err := getConfigFromContext(ctx)
|
2018-03-15 20:45:37 +00:00
|
|
|
if err != nil {
|
2018-03-17 11:53:21 +00:00
|
|
|
return cli.NewExitError(err, 1)
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
2019-12-30 07:43:05 +00:00
|
|
|
log, err := handleLoggingParams(ctx, cfg.ApplicationConfiguration)
|
|
|
|
if err != nil {
|
2019-11-05 12:22:07 +00:00
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
2019-12-27 09:11:57 +00:00
|
|
|
count := uint32(ctx.Uint("count"))
|
|
|
|
start := uint32(ctx.Uint("start"))
|
2018-03-09 15:55:25 +00:00
|
|
|
|
2019-10-21 05:41:05 +00:00
|
|
|
var outStream = os.Stdout
|
|
|
|
if out := ctx.String("out"); out != "" {
|
|
|
|
outStream, err = os.Create(out)
|
|
|
|
if err != nil {
|
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
defer outStream.Close()
|
|
|
|
writer := io.NewBinWriterFromIO(outStream)
|
|
|
|
|
2019-12-30 07:43:05 +00:00
|
|
|
chain, prometheus, pprof, err := initBCWithMetrics(cfg, log)
|
2019-10-21 05:41:05 +00:00
|
|
|
if err != nil {
|
2019-12-16 14:04:18 +00:00
|
|
|
return err
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 09:11:57 +00:00
|
|
|
chainCount := chain.BlockHeight() + 1
|
|
|
|
if start+count > chainCount {
|
|
|
|
return cli.NewExitError(fmt.Errorf("chain is not that high (%d) to dump %d blocks starting from %d", chainCount-1, count, start), 1)
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
|
|
|
if count == 0 {
|
2019-12-27 09:11:57 +00:00
|
|
|
count = chainCount - start
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
2019-12-12 15:52:23 +00:00
|
|
|
writer.WriteU32LE(count)
|
2019-12-27 09:11:57 +00:00
|
|
|
for i := start; i < start+count; i++ {
|
2019-10-21 05:41:05 +00:00
|
|
|
bh := chain.GetHeaderHash(int(i))
|
|
|
|
b, err := chain.GetBlock(bh)
|
|
|
|
if err != nil {
|
2020-08-06 16:09:57 +00:00
|
|
|
return cli.NewExitError(fmt.Errorf("failed to get block %d: %w", i, err), 1)
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
2019-11-11 10:15:34 +00:00
|
|
|
buf := io.NewBufBinWriter()
|
|
|
|
b.EncodeBinary(buf.BinWriter)
|
|
|
|
bytes := buf.Bytes()
|
2019-12-26 17:03:06 +00:00
|
|
|
writer.WriteU32LE(uint32(len(bytes)))
|
|
|
|
writer.WriteBytes(bytes)
|
2019-10-21 05:41:05 +00:00
|
|
|
if writer.Err != nil {
|
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
|
|
|
}
|
2019-12-16 14:04:18 +00:00
|
|
|
pprof.ShutDown()
|
|
|
|
prometheus.ShutDown()
|
2019-11-07 17:47:48 +00:00
|
|
|
chain.Close()
|
2019-10-21 05:41:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-12-16 14:04:18 +00:00
|
|
|
|
2019-10-21 05:41:05 +00:00
|
|
|
func restoreDB(ctx *cli.Context) error {
|
|
|
|
cfg, err := getConfigFromContext(ctx)
|
2018-03-14 09:36:59 +00:00
|
|
|
if err != nil {
|
2019-09-10 14:22:21 +00:00
|
|
|
return err
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
2019-12-30 07:43:05 +00:00
|
|
|
log, err := handleLoggingParams(ctx, cfg.ApplicationConfiguration)
|
|
|
|
if err != nil {
|
2019-11-05 12:22:07 +00:00
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
2019-12-27 09:11:57 +00:00
|
|
|
count := uint32(ctx.Uint("count"))
|
|
|
|
skip := uint32(ctx.Uint("skip"))
|
2018-03-14 09:36:59 +00:00
|
|
|
|
2019-10-21 05:41:05 +00:00
|
|
|
var inStream = os.Stdin
|
|
|
|
if in := ctx.String("in"); in != "" {
|
|
|
|
inStream, err = os.Open(in)
|
|
|
|
if err != nil {
|
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
defer inStream.Close()
|
|
|
|
reader := io.NewBinReaderFromIO(inStream)
|
|
|
|
|
2020-02-06 15:47:03 +00:00
|
|
|
dumpDir := ctx.String("dump")
|
|
|
|
if dumpDir != "" {
|
|
|
|
cfg.ProtocolConfiguration.SaveStorageBatch = true
|
|
|
|
}
|
|
|
|
|
2019-12-30 07:43:05 +00:00
|
|
|
chain, prometheus, pprof, err := initBCWithMetrics(cfg, log)
|
2019-10-21 05:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-12-27 09:38:07 +00:00
|
|
|
defer chain.Close()
|
|
|
|
defer prometheus.ShutDown()
|
|
|
|
defer pprof.ShutDown()
|
2019-10-21 05:41:05 +00:00
|
|
|
|
2019-12-12 15:52:23 +00:00
|
|
|
var allBlocks = reader.ReadU32LE()
|
2019-10-21 05:41:05 +00:00
|
|
|
if reader.Err != nil {
|
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
|
|
|
if skip+count > allBlocks {
|
|
|
|
return cli.NewExitError(fmt.Errorf("input file has only %d blocks, can't read %d starting from %d", allBlocks, count, skip), 1)
|
|
|
|
}
|
|
|
|
if count == 0 {
|
2019-12-27 09:15:47 +00:00
|
|
|
count = allBlocks - skip
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
|
|
|
i := uint32(0)
|
|
|
|
for ; i < skip; i++ {
|
2019-11-11 10:15:34 +00:00
|
|
|
_, err := readBlock(reader)
|
|
|
|
if err != nil {
|
2019-10-21 05:41:05 +00:00
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
|
|
|
}
|
2020-02-06 15:47:03 +00:00
|
|
|
|
2020-03-16 09:00:22 +00:00
|
|
|
gctx := newGraceContext()
|
|
|
|
var lastIndex uint32
|
2020-02-06 15:47:03 +00:00
|
|
|
dump := newDump()
|
2020-03-16 09:00:22 +00:00
|
|
|
defer func() {
|
|
|
|
_ = dump.tryPersist(dumpDir, lastIndex)
|
|
|
|
}()
|
2020-02-06 15:47:03 +00:00
|
|
|
|
2019-11-08 13:56:54 +00:00
|
|
|
for ; i < skip+count; i++ {
|
2020-03-16 09:00:22 +00:00
|
|
|
select {
|
|
|
|
case <-gctx.Done():
|
|
|
|
return cli.NewExitError("cancelled", 1)
|
|
|
|
default:
|
|
|
|
}
|
2019-11-11 10:15:34 +00:00
|
|
|
bytes, err := readBlock(reader)
|
2020-06-18 09:00:51 +00:00
|
|
|
block := block.New(cfg.ProtocolConfiguration.Magic)
|
2019-11-11 10:15:34 +00:00
|
|
|
newReader := io.NewBinReaderFromBuf(bytes)
|
|
|
|
block.DecodeBinary(newReader)
|
|
|
|
if err != nil {
|
2019-10-21 05:41:05 +00:00
|
|
|
return cli.NewExitError(err, 1)
|
|
|
|
}
|
2019-12-27 09:25:39 +00:00
|
|
|
if block.Index == 0 && i == 0 && skip == 0 {
|
|
|
|
genesis, err := chain.GetBlock(block.Hash())
|
|
|
|
if err == nil && genesis.Index == 0 {
|
2019-12-30 07:43:05 +00:00
|
|
|
log.Info("skipped genesis block", zap.String("hash", block.Hash().StringLE()))
|
2020-06-24 13:09:54 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = chain.AddBlock(block)
|
|
|
|
if err != nil {
|
2020-08-06 16:09:57 +00:00
|
|
|
return cli.NewExitError(fmt.Errorf("failed to add block %d: %w", i, err), 1)
|
2019-12-27 09:25:39 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-06 15:47:03 +00:00
|
|
|
if dumpDir != "" {
|
|
|
|
batch := chain.LastBatch()
|
2020-06-24 13:09:54 +00:00
|
|
|
// The genesis block may already be persisted, so LastBatch() will return nil.
|
|
|
|
if batch == nil && block.Index == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2020-02-06 15:47:03 +00:00
|
|
|
dump.add(block.Index, batch)
|
2020-03-16 09:00:22 +00:00
|
|
|
lastIndex = block.Index
|
|
|
|
if block.Index%1000 == 0 {
|
|
|
|
if err := dump.tryPersist(dumpDir, block.Index); err != nil {
|
2020-08-06 16:09:57 +00:00
|
|
|
return cli.NewExitError(fmt.Errorf("can't dump storage to file: %w", err), 1)
|
2020-03-16 09:00:22 +00:00
|
|
|
}
|
2020-02-06 15:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-21 05:41:05 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-11 10:15:34 +00:00
|
|
|
// readBlock performs reading of block size and then bytes with the length equal to that size.
|
|
|
|
func readBlock(reader *io.BinReader) ([]byte, error) {
|
2019-12-12 15:52:23 +00:00
|
|
|
var size = reader.ReadU32LE()
|
2019-11-11 10:15:34 +00:00
|
|
|
bytes := make([]byte, size)
|
2019-12-12 15:52:23 +00:00
|
|
|
reader.ReadBytes(bytes)
|
2019-11-11 10:15:34 +00:00
|
|
|
if reader.Err != nil {
|
|
|
|
return nil, reader.Err
|
|
|
|
}
|
|
|
|
return bytes, nil
|
|
|
|
}
|
|
|
|
|
2019-10-21 05:41:05 +00:00
|
|
|
func startServer(ctx *cli.Context) error {
|
|
|
|
cfg, err := getConfigFromContext(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-12-30 07:43:05 +00:00
|
|
|
log, err := handleLoggingParams(ctx, cfg.ApplicationConfiguration)
|
|
|
|
if err != nil {
|
2019-11-05 12:22:07 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-10-21 05:41:05 +00:00
|
|
|
|
|
|
|
grace, cancel := context.WithCancel(newGraceContext())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
serverConfig := network.NewServerConfig(cfg)
|
|
|
|
|
2019-12-30 07:43:05 +00:00
|
|
|
chain, prometheus, pprof, err := initBCWithMetrics(cfg, log)
|
2019-10-21 05:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-03-17 11:53:21 +00:00
|
|
|
}
|
|
|
|
|
2020-02-17 12:17:02 +00:00
|
|
|
serv, err := network.NewServer(serverConfig, chain, log)
|
2020-01-22 08:17:51 +00:00
|
|
|
if err != nil {
|
2020-08-06 16:09:57 +00:00
|
|
|
return cli.NewExitError(fmt.Errorf("failed to create network server: %w", err), 1)
|
2020-01-22 08:17:51 +00:00
|
|
|
}
|
2020-02-17 12:17:02 +00:00
|
|
|
rpcServer := server.New(chain, cfg.ApplicationConfiguration.RPC, serv, log)
|
2018-03-23 20:36:59 +00:00
|
|
|
errChan := make(chan error)
|
|
|
|
|
2020-02-17 12:17:02 +00:00
|
|
|
go serv.Start(errChan)
|
2018-03-23 20:36:59 +00:00
|
|
|
go rpcServer.Start(errChan)
|
|
|
|
|
2018-03-25 10:45:54 +00:00
|
|
|
fmt.Println(logo())
|
2020-02-17 12:17:02 +00:00
|
|
|
fmt.Println(serv.UserAgent)
|
2018-03-25 10:45:54 +00:00
|
|
|
fmt.Println()
|
|
|
|
|
|
|
|
var shutdownErr error
|
2018-03-23 20:36:59 +00:00
|
|
|
Main:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
2020-08-06 14:44:08 +00:00
|
|
|
shutdownErr = fmt.Errorf("server error: %w", err)
|
2019-02-19 11:48:48 +00:00
|
|
|
cancel()
|
2018-03-23 20:36:59 +00:00
|
|
|
|
2019-02-19 11:48:48 +00:00
|
|
|
case <-grace.Done():
|
2020-02-17 12:17:02 +00:00
|
|
|
serv.Shutdown()
|
2018-03-23 20:36:59 +00:00
|
|
|
if serverErr := rpcServer.Shutdown(); serverErr != nil {
|
2020-08-06 14:44:08 +00:00
|
|
|
shutdownErr = fmt.Errorf("error on shutdown: %w", serverErr)
|
2018-03-23 20:36:59 +00:00
|
|
|
}
|
2019-12-04 06:48:32 +00:00
|
|
|
prometheus.ShutDown()
|
|
|
|
pprof.ShutDown()
|
2019-11-07 17:47:48 +00:00
|
|
|
chain.Close()
|
2018-03-23 20:36:59 +00:00
|
|
|
break Main
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if shutdownErr != nil {
|
|
|
|
return cli.NewExitError(shutdownErr, 1)
|
|
|
|
}
|
|
|
|
|
2018-02-09 16:08:50 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-04 06:48:32 +00:00
|
|
|
// configureAddresses sets up addresses for RPC, Prometheus and Pprof depending from the provided config.
|
|
|
|
// In case RPC or Prometheus or Pprof Address provided each of them will use it.
|
|
|
|
// In case global Address (of the node) provided and RPC/Prometheus/Pprof don't have configured addresses they will
|
|
|
|
// use global one. So Node and RPC and Prometheus and Pprof will run on one address.
|
2020-08-25 12:41:50 +00:00
|
|
|
func configureAddresses(cfg *config.ApplicationConfiguration) {
|
2019-11-05 12:22:07 +00:00
|
|
|
if cfg.Address != "" {
|
|
|
|
if cfg.RPC.Address == "" {
|
|
|
|
cfg.RPC.Address = cfg.Address
|
|
|
|
}
|
2019-12-04 06:48:32 +00:00
|
|
|
if cfg.Prometheus.Address == "" {
|
|
|
|
cfg.Prometheus.Address = cfg.Address
|
|
|
|
}
|
|
|
|
if cfg.Pprof.Address == "" {
|
|
|
|
cfg.Pprof.Address = cfg.Address
|
2019-11-05 12:22:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 14:22:21 +00:00
|
|
|
// initBlockChain initializes BlockChain with preselected DB.
|
2019-12-30 07:43:05 +00:00
|
|
|
func initBlockChain(cfg config.Config, log *zap.Logger) (*core.Blockchain, error) {
|
2019-09-16 15:52:47 +00:00
|
|
|
store, err := storage.NewStore(cfg.ApplicationConfiguration.DBConfiguration)
|
2019-09-10 14:22:21 +00:00
|
|
|
if err != nil {
|
2020-08-06 16:09:57 +00:00
|
|
|
return nil, cli.NewExitError(fmt.Errorf("could not initialize storage: %w", err), 1)
|
2019-09-10 14:22:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-30 07:43:05 +00:00
|
|
|
chain, err := core.NewBlockchain(store, cfg.ProtocolConfiguration, log)
|
2019-09-10 14:22:21 +00:00
|
|
|
if err != nil {
|
2020-08-06 16:09:57 +00:00
|
|
|
return nil, cli.NewExitError(fmt.Errorf("could not initialize blockchain: %w", err), 1)
|
2019-09-10 14:22:21 +00:00
|
|
|
}
|
|
|
|
return chain, nil
|
|
|
|
}
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
func logo() string {
|
|
|
|
return `
|
|
|
|
_ ____________ __________
|
|
|
|
/ | / / ____/ __ \ / ____/ __ \
|
|
|
|
/ |/ / __/ / / / /_____/ / __/ / / /
|
|
|
|
/ /| / /___/ /_/ /_____/ /_/ / /_/ /
|
|
|
|
/_/ |_/_____/\____/ \____/\____/
|
|
|
|
`
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|