cli: dump storage changes into JSON files
This commit is contained in:
parent
fb9af98179
commit
b1d9e1132d
4 changed files with 191 additions and 0 deletions
158
cli/server/dump.go
Normal file
158
cli/server/dump.go
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/CityOfZion/neo-go/pkg/core/storage"
|
||||||
|
"github.com/CityOfZion/neo-go/pkg/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type dump []interface{}
|
||||||
|
|
||||||
|
type storageOp struct {
|
||||||
|
State string `json:"state"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
Value string `json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NEO has some differences of key storing.
|
||||||
|
// out format: script hash in LE + key
|
||||||
|
// neo format: script hash in BE + byte(0) + key with 0 between every 16 bytes, padded to len 16.
|
||||||
|
func toNeoStorageKey(key []byte) []byte {
|
||||||
|
if len(key) < util.Uint160Size {
|
||||||
|
panic("invalid key in storage")
|
||||||
|
}
|
||||||
|
|
||||||
|
var nkey []byte
|
||||||
|
for i := util.Uint160Size - 1; i >= 0; i-- {
|
||||||
|
nkey = append(nkey, key[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
key = key[util.Uint160Size:]
|
||||||
|
|
||||||
|
index := 0
|
||||||
|
remain := len(key)
|
||||||
|
for remain >= 16 {
|
||||||
|
nkey = append(nkey, key[index:index+16]...)
|
||||||
|
nkey = append(nkey, 0)
|
||||||
|
index += 16
|
||||||
|
remain -= 16
|
||||||
|
}
|
||||||
|
|
||||||
|
if remain > 0 {
|
||||||
|
nkey = append(nkey, key[index:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
padding := 16 - remain
|
||||||
|
for i := 0; i < padding; i++ {
|
||||||
|
nkey = append(nkey, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
nkey = append(nkey, byte(padding))
|
||||||
|
|
||||||
|
return nkey
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchToMap converts batch to a map so that JSON is compatible
|
||||||
|
// with https://github.com/NeoResearch/neo-storage-audit/
|
||||||
|
func batchToMap(index uint32, batch *storage.MemBatch) map[string]interface{} {
|
||||||
|
size := len(batch.Put) + len(batch.Deleted)
|
||||||
|
ops := make([]storageOp, 0, size)
|
||||||
|
for i := range batch.Put {
|
||||||
|
key := batch.Put[i].Key
|
||||||
|
if len(key) == 0 || key[0] != byte(storage.STStorage) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key = toNeoStorageKey(key[1:])
|
||||||
|
ops = append(ops, storageOp{
|
||||||
|
State: "Added",
|
||||||
|
Key: hex.EncodeToString(key),
|
||||||
|
Value: "00" + hex.EncodeToString(batch.Put[i].Value),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range batch.Deleted {
|
||||||
|
key := batch.Deleted[i].Key
|
||||||
|
if len(key) == 0 || key[0] != byte(storage.STStorage) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key = toNeoStorageKey(key[1:])
|
||||||
|
ops = append(ops, storageOp{
|
||||||
|
State: "Deleted",
|
||||||
|
Key: hex.EncodeToString(key),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
"block": index,
|
||||||
|
"size": len(ops),
|
||||||
|
"storage": ops,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDump() *dump {
|
||||||
|
return new(dump)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dump) add(index uint32, batch *storage.MemBatch) {
|
||||||
|
m := batchToMap(index, batch)
|
||||||
|
*d = append(*d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dump) tryPersist(prefix string, index uint32) error {
|
||||||
|
if index%1000 != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := createFile(prefix, index)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
enc := json.NewEncoder(f)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
if err := enc.Encode(*d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*d = (*d)[:0]
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createFile creates directory and file in it for storing blocks up to index.
|
||||||
|
// Directory structure is the following:
|
||||||
|
// https://github.com/NeoResearch/neo-storage-audit#folder-organization-where-to-find-the-desired-block
|
||||||
|
// Dir `BlockStorage_$DIRNO` contains blocks up to $DIRNO (from $DIRNO-100k)
|
||||||
|
// Inside it there are files grouped by 1k blocks.
|
||||||
|
// File dump-block-$FILENO.json contains blocks from $FILENO-999, $FILENO
|
||||||
|
// Example: file `BlockStorage_100000/dump-block-6000.json` contains blocks from 5001 to 6000.
|
||||||
|
func createFile(prefix string, index uint32) (*os.File, error) {
|
||||||
|
dirN := (index-1)/100000 + 1
|
||||||
|
dir := fmt.Sprintf("BlockStorage_%d00000", dirN)
|
||||||
|
|
||||||
|
path := filepath.Join(prefix, dir)
|
||||||
|
info, err := os.Stat(path)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err := os.MkdirAll(path, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if !info.IsDir() {
|
||||||
|
return nil, fmt.Errorf("file `%s` is not a directory", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
fileN := (index-1)/1000 + 1
|
||||||
|
file := fmt.Sprintf("dump-block-%d000.json", fileN)
|
||||||
|
path = filepath.Join(path, file)
|
||||||
|
|
||||||
|
return os.Create(path)
|
||||||
|
}
|
|
@ -61,6 +61,10 @@ func NewCommands() []cli.Command {
|
||||||
Name: "in, i",
|
Name: "in, i",
|
||||||
Usage: "Input file (stdin if not given)",
|
Usage: "Input file (stdin if not given)",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "dump",
|
||||||
|
Usage: "directory for storing JSON dumps",
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return []cli.Command{
|
return []cli.Command{
|
||||||
{
|
{
|
||||||
|
@ -242,6 +246,11 @@ func restoreDB(ctx *cli.Context) error {
|
||||||
defer inStream.Close()
|
defer inStream.Close()
|
||||||
reader := io.NewBinReaderFromIO(inStream)
|
reader := io.NewBinReaderFromIO(inStream)
|
||||||
|
|
||||||
|
dumpDir := ctx.String("dump")
|
||||||
|
if dumpDir != "" {
|
||||||
|
cfg.ProtocolConfiguration.SaveStorageBatch = true
|
||||||
|
}
|
||||||
|
|
||||||
chain, prometheus, pprof, err := initBCWithMetrics(cfg, log)
|
chain, prometheus, pprof, err := initBCWithMetrics(cfg, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -267,6 +276,9 @@ func restoreDB(ctx *cli.Context) error {
|
||||||
return cli.NewExitError(err, 1)
|
return cli.NewExitError(err, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dump := newDump()
|
||||||
|
|
||||||
for ; i < skip+count; i++ {
|
for ; i < skip+count; i++ {
|
||||||
bytes, err := readBlock(reader)
|
bytes, err := readBlock(reader)
|
||||||
block := &block.Block{}
|
block := &block.Block{}
|
||||||
|
@ -286,6 +298,14 @@ func restoreDB(ctx *cli.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cli.NewExitError(fmt.Errorf("failed to add block %d: %s", i, err), 1)
|
return cli.NewExitError(fmt.Errorf("failed to add block %d: %s", i, err), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if dumpDir != "" {
|
||||||
|
batch := chain.LastBatch()
|
||||||
|
dump.add(block.Index, batch)
|
||||||
|
if err := dump.tryPersist(dumpDir, block.Index); err != nil {
|
||||||
|
return cli.NewExitError(fmt.Errorf("can't dump storage to file: %v", err), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,6 +57,8 @@ type (
|
||||||
VerifyTransactions bool `yaml:"VerifyTransactions"`
|
VerifyTransactions bool `yaml:"VerifyTransactions"`
|
||||||
// FreeGasLimit is an amount of GAS which can be spent for free.
|
// FreeGasLimit is an amount of GAS which can be spent for free.
|
||||||
FreeGasLimit util.Fixed8 `yaml:"FreeGasLimit"`
|
FreeGasLimit util.Fixed8 `yaml:"FreeGasLimit"`
|
||||||
|
// SaveStorageBatch enables storage batch saving before every persist.
|
||||||
|
SaveStorageBatch bool `yaml:"SaveStorageBatch"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemFee fees related to system.
|
// SystemFee fees related to system.
|
||||||
|
|
|
@ -109,6 +109,8 @@ type Blockchain struct {
|
||||||
keyCache map[util.Uint160]map[string]*keys.PublicKey
|
keyCache map[util.Uint160]map[string]*keys.PublicKey
|
||||||
|
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
|
||||||
|
lastBatch *storage.MemBatch
|
||||||
}
|
}
|
||||||
|
|
||||||
type headersOpFunc func(headerList *HeaderHashList)
|
type headersOpFunc func(headerList *HeaderHashList)
|
||||||
|
@ -604,6 +606,10 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
||||||
bc.lock.Lock()
|
bc.lock.Lock()
|
||||||
defer bc.lock.Unlock()
|
defer bc.lock.Unlock()
|
||||||
|
|
||||||
|
if bc.config.SaveStorageBatch {
|
||||||
|
bc.lastBatch = cache.dao.store.GetBatch()
|
||||||
|
}
|
||||||
|
|
||||||
_, err := cache.Persist()
|
_, err := cache.Persist()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -615,6 +621,11 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LastBatch returns last persisted storage batch.
|
||||||
|
func (bc *Blockchain) LastBatch() *storage.MemBatch {
|
||||||
|
return bc.lastBatch
|
||||||
|
}
|
||||||
|
|
||||||
// processOutputs processes transaction outputs.
|
// processOutputs processes transaction outputs.
|
||||||
func processOutputs(tx *transaction.Transaction, dao *cachedDao) error {
|
func processOutputs(tx *transaction.Transaction, dao *cachedDao) error {
|
||||||
for index, output := range tx.Outputs {
|
for index, output := range tx.Outputs {
|
||||||
|
|
Loading…
Reference in a new issue