diff --git a/cli/server/server.go b/cli/server/server.go index cd01a074e..32b8322aa 100644 --- a/cli/server/server.go +++ b/cli/server/server.go @@ -9,6 +9,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/core" "github.com/nspcc-dev/neo-go/pkg/core/block" + "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/storage" "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/io" @@ -49,6 +50,10 @@ func NewCommands() []cli.Command { Name: "out, o", Usage: "Output file (stdout if not given)", }, + cli.StringFlag{ + Name: "state, r", + Usage: "File to export state roots to", + }, ) var cfgCountInFlags = make([]cli.Flag, len(cfgWithCountFlags)) copy(cfgCountInFlags, cfgWithCountFlags) @@ -65,6 +70,10 @@ func NewCommands() []cli.Command { Name: "diff, k", Usage: "Use if DB is restore from diff and not full dump", }, + cli.StringFlag{ + Name: "state, r", + Usage: "File to import state roots from", + }, ) return []cli.Command{ { @@ -204,22 +213,47 @@ func dumpDB(ctx *cli.Context) error { if count == 0 { count = chainCount - start } + + var rootsWriter *io.BinWriter + if out := ctx.String("state"); out != "" { + rootsStream, err := os.Create(out) + if err != nil { + return cli.NewExitError(fmt.Errorf("can't create file for state roots: %w", err), 1) + } + defer rootsStream.Close() + rootsWriter = io.NewBinWriterFromIO(rootsStream) + } if start != 0 { writer.WriteU32LE(start) } writer.WriteU32LE(count) + + rootsCount := count + if rootsWriter != nil { + rootsWriter.WriteU32LE(start) + if h := chain.StateHeight() + 1; start+rootsCount > h { + log.Info("state height is low, state root dump will be cut", zap.Uint32("height", h)) + rootsCount = h - start + } + rootsWriter.WriteU32LE(rootsCount) + } + for i := start; i < start+count; i++ { + if rootsWriter != nil && i < start+rootsCount { + r, err := chain.GetStateRoot(i) + if err != nil { + return cli.NewExitError(fmt.Errorf("failed to get stateroot %d: %w", i, err), 1) + } + if err := writeSizedItem(&r.MPTRoot, rootsWriter); err != nil { + return cli.NewExitError(err, 1) + } + } bh := chain.GetHeaderHash(int(i)) b, err := chain.GetBlock(bh) if err != nil { return cli.NewExitError(fmt.Errorf("failed to get block %d: %s", i, err), 1) } - buf := io.NewBufBinWriter() - b.EncodeBinary(buf.BinWriter) - bytes := buf.Bytes() - writer.WriteU32LE(uint32(len(bytes))) - writer.WriteBytes(bytes) - if writer.Err != nil { + if err := writeSizedItem(b, writer); err != nil { return cli.NewExitError(err, 1) } } @@ -229,6 +263,15 @@ func dumpDB(ctx *cli.Context) error { return nil } +func writeSizedItem(item io.Serializable, w *io.BinWriter) error { + buf := io.NewBufBinWriter() + item.EncodeBinary(buf.BinWriter) + bytes := buf.Bytes() + w.WriteU32LE(uint32(len(bytes))) + w.WriteBytes(bytes) + return w.Err +} + func restoreDB(ctx *cli.Context) error { cfg, err := getConfigFromContext(ctx) if err != nil { @@ -251,6 +294,16 @@ func restoreDB(ctx *cli.Context) error { defer inStream.Close() reader := io.NewBinReaderFromIO(inStream) + var rootsIn *io.BinReader + if in := ctx.String("state"); in != "" { + inStream, err := os.Open(in) + if err != nil { + return cli.NewExitError(err, 1) + } + defer inStream.Close() + rootsIn = io.NewBinReaderFromIO(inStream) + } + dumpDir := ctx.String("dump") if dumpDir != "" { cfg.ProtocolConfiguration.SaveStorageBatch = true @@ -272,7 +325,7 @@ func restoreDB(ctx *cli.Context) error { dumpSize = reader.ReadU32LE() } if reader.Err != nil { - return cli.NewExitError(err, 1) + return cli.NewExitError(reader.Err, 1) } if start < dumpStart { return cli.NewExitError(fmt.Errorf("input file start from %d block, can't import %d", dumpStart, start), 1) @@ -285,13 +338,40 @@ func restoreDB(ctx *cli.Context) error { if count == 0 { count = lastBlock - start } + + var rootStart, rootSize uint32 + rootCount := count + if rootsIn != nil { + rootStart = rootsIn.ReadU32LE() + rootSize = rootsIn.ReadU32LE() + if rootsIn.Err != nil { + return cli.NewExitError(fmt.Errorf("error while reading roots file: %w", rootsIn.Err), 1) + } + if start < rootStart { + return cli.NewExitError(fmt.Errorf("roots file start from %d root, can't import %d", rootStart, start), 1) + } + lastRoot := rootStart + rootSize + if rootStart+rootCount > lastRoot { + log.Info("state root height is low", zap.Uint32("height", lastRoot)) + rootCount = lastRoot - rootStart + } + } + i := dumpStart for ; i < start; i++ { - _, err := readBlock(reader) + _, err := readBytes(reader) if err != nil { return cli.NewExitError(err, 1) } } + if rootsIn != nil { + for j := rootStart; j < start; j++ { + _, err := readBytes(rootsIn) + if err != nil { + return cli.NewExitError(err, 1) + } + } + } gctx := newGraceContext() var lastIndex uint32 @@ -306,44 +386,60 @@ func restoreDB(ctx *cli.Context) error { return cli.NewExitError("cancelled", 1) default: } - bytes, err := readBlock(reader) - if err != nil { + block := new(block.Block) + if err := readSizedItem(block, reader); err != nil { return cli.NewExitError(err, 1) } - block := &block.Block{} - newReader := io.NewBinReaderFromBuf(bytes) - block.DecodeBinary(newReader) - if newReader.Err != nil { - return cli.NewExitError(newReader.Err, 1) - } + var skipBlock bool if block.Index == 0 && i == 0 && start == 0 { genesis, err := chain.GetBlock(block.Hash()) if err == nil && genesis.Index == 0 { log.Info("skipped genesis block", zap.String("hash", block.Hash().StringLE())) - continue + skipBlock = true } } - err = chain.AddBlock(block) - if err != nil { - return cli.NewExitError(fmt.Errorf("failed to add block %d: %s", i, err), 1) - } - - if dumpDir != "" { - batch := chain.LastBatch() - dump.add(block.Index, batch) - lastIndex = block.Index - if block.Index%1000 == 0 { - if err := dump.tryPersist(dumpDir, block.Index); err != nil { - return cli.NewExitError(fmt.Errorf("can't dump storage to file: %v", err), 1) + if !skipBlock { + err = chain.AddBlock(block) + if err != nil { + return cli.NewExitError(fmt.Errorf("failed to add block %d: %s", i, err), 1) + } + if dumpDir != "" { + batch := chain.LastBatch() + dump.add(block.Index, batch) + lastIndex = block.Index + if block.Index%1000 == 0 { + if err := dump.tryPersist(dumpDir, block.Index); err != nil { + return cli.NewExitError(fmt.Errorf("can't dump storage to file: %v", err), 1) + } } } } + if rootsIn != nil && i < rootStart+rootCount { + sr := new(state.MPTRoot) + if err := readSizedItem(sr, rootsIn); err != nil { + return cli.NewExitError(err, 1) + } + err = chain.AddStateRoot(sr) + if err != nil { + return cli.NewExitError(fmt.Errorf("can't add state root: %w", err), 1) + } + } } return nil } -// readBlock performs reading of block size and then bytes with the length equal to that size. -func readBlock(reader *io.BinReader) ([]byte, error) { +func readSizedItem(item io.Serializable, r *io.BinReader) error { + bytes, err := readBytes(r) + if err != nil { + return err + } + newReader := io.NewBinReaderFromBuf(bytes) + item.DecodeBinary(newReader) + return newReader.Err +} + +// readBytes performs reading of block size and then bytes with the length equal to that size. +func readBytes(reader *io.BinReader) ([]byte, error) { var size = reader.ReadU32LE() bytes := make([]byte, size) reader.ReadBytes(bytes)