mirror of
https://github.com/nspcc-dev/neo-go.git
synced 2024-12-22 19:19:09 +00:00
Merge branch 'dev-prepared-for-master' into mastev
See #283 for details. This just makes the `dev` code available in the `master` branch without any attempt to make it really do something.
This commit is contained in:
commit
beec8f114a
189 changed files with 16376 additions and 0 deletions
84
.circleci/config.yml
Normal file
84
.circleci/config.yml
Normal file
|
@ -0,0 +1,84 @@
|
|||
version: 2.1
|
||||
executors:
|
||||
go1_11:
|
||||
docker:
|
||||
- image: circleci/golang:1.11
|
||||
environment:
|
||||
GO111MODULE: "on"
|
||||
go1_12:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
environment:
|
||||
GO111MODULE: "on"
|
||||
|
||||
commands:
|
||||
gomod:
|
||||
steps:
|
||||
- restore_cache:
|
||||
keys: [deps-]
|
||||
- run:
|
||||
name: Download go module dependencies
|
||||
command: go mod download
|
||||
- save_cache:
|
||||
key: deps-{{ checksum "go.sum" }}-{{ checksum "go.sum" }}
|
||||
paths: [/go/pkg/mod]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
working_directory: /go/src/github.com/CityOfZion/neo-go
|
||||
executor: go1_12
|
||||
steps:
|
||||
- checkout
|
||||
- gomod
|
||||
- run:
|
||||
name: go-lint
|
||||
command: |
|
||||
go get -u -v golang.org/x/lint/golint
|
||||
golint -set_exit_status ./...
|
||||
|
||||
vet:
|
||||
working_directory: /go/src/github.com/CityOfZion/neo-go
|
||||
executor: go1_12
|
||||
steps:
|
||||
- checkout
|
||||
- gomod
|
||||
- run:
|
||||
name: go-vet
|
||||
command: go vet ./...
|
||||
|
||||
test_1_11:
|
||||
working_directory: /go/src/github.com/CityOfZion/neo-go
|
||||
executor: go1_11
|
||||
steps:
|
||||
- checkout
|
||||
- gomod
|
||||
- run: go test -v -race ./...
|
||||
|
||||
test_1_12:
|
||||
working_directory: /go/src/github.com/CityOfZion/neo-go
|
||||
executor: go1_12
|
||||
steps:
|
||||
- checkout
|
||||
- gomod
|
||||
- run: go test -v -race ./...
|
||||
|
||||
workflows:
|
||||
version: 2.1
|
||||
workflow:
|
||||
jobs:
|
||||
- vet:
|
||||
filters:
|
||||
tags:
|
||||
only: /[0-9]+\.[0-9]+\.[0-9]+/
|
||||
- lint:
|
||||
filters:
|
||||
tags:
|
||||
only: /[0-9]+\.[0-9]+\.[0-9]+/
|
||||
- test_1_11:
|
||||
filters:
|
||||
tags:
|
||||
only: /[0-9]+\.[0-9]+\.[0-9]+/
|
||||
- test_1_12:
|
||||
filters:
|
||||
tags:
|
||||
only: /[0-9]+\.[0-9]+\.[0-9]+/
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -24,6 +24,9 @@ bin/
|
|||
!.vscode/extensions.json
|
||||
# goland
|
||||
.idea/*
|
||||
# emacs
|
||||
*~
|
||||
TAGS
|
||||
|
||||
# anthdm todolists
|
||||
/pkg/vm/compiler/todo.md
|
||||
|
|
17
.travis.yml
Normal file
17
.travis.yml
Normal file
|
@ -0,0 +1,17 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
install:
|
||||
- go get -v golang.org/x/lint/golint
|
||||
- go mod tidy -v
|
||||
script:
|
||||
- golint -set_exit_status ./...
|
||||
- go test -v -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
12
_pkg.dev/Readme.md
Normal file
12
_pkg.dev/Readme.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
# ReadMe
|
||||
|
||||
Currently this package is in Development.
|
||||
|
||||
|
||||
## References
|
||||
|
||||
btcd https://github.com/btcsuite/btcd
|
||||
|
||||
geth https://github.com/ethereum/go-ethereum
|
||||
|
||||
aeternity https://github.com/aeternity/elixir-node
|
137
_pkg.dev/chain/chain.go
Normal file
137
_pkg.dev/chain/chain.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
package chain
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/chaincfg"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload/transaction"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/database"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBlockAlreadyExists happens when you try to save the same block twice
|
||||
ErrBlockAlreadyExists = errors.New("this block has already been saved in the database")
|
||||
|
||||
// ErrFutureBlock happens when you try to save a block that is not the next block sequentially
|
||||
ErrFutureBlock = errors.New("this is not the next block sequentially, that should be added to the chain")
|
||||
)
|
||||
|
||||
// Chain represents a blockchain instance
|
||||
type Chain struct {
|
||||
Db *Chaindb
|
||||
height uint32
|
||||
}
|
||||
|
||||
// New returns a new chain instance
|
||||
func New(db database.Database, magic protocol.Magic) (*Chain, error) {
|
||||
|
||||
chain := &Chain{
|
||||
Db: &Chaindb{db},
|
||||
}
|
||||
|
||||
// Get last header saved to see if this is a fresh database
|
||||
_, err := chain.Db.GetLastHeader()
|
||||
if err == nil {
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
if err != database.ErrNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We have a database.ErrNotFound. Insert the genesisBlock
|
||||
fmt.Printf("Starting a fresh database for %s\n", magic.String())
|
||||
|
||||
params, err := chaincfg.NetParams(magic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = chain.Db.saveHeader(¶ms.GenesisBlock.BlockBase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = chain.Db.saveBlock(params.GenesisBlock, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
// ProcessBlock verifies and saves the block in the database
|
||||
// XXX: for now we will just save without verifying the block
|
||||
// This function is called by the server and if an error is returned then
|
||||
// the server informs the sync manager to redownload the block
|
||||
// XXX:We should also check if the header is already saved in the database
|
||||
// If not, then we need to validate the header with the rest of the chain
|
||||
// For now we re-save the header
|
||||
func (c *Chain) ProcessBlock(block payload.Block) error {
|
||||
|
||||
// Check if we already have this block saved
|
||||
// XXX: We can optimise by implementing a Has() method
|
||||
// caching the last block in memory
|
||||
lastBlock, err := c.Db.GetLastBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lastBlock.Index > block.Index {
|
||||
return ErrBlockAlreadyExists
|
||||
}
|
||||
|
||||
if block.Index > lastBlock.Index+1 {
|
||||
return ErrFutureBlock
|
||||
}
|
||||
|
||||
err = c.verifyBlock(block)
|
||||
if err != nil {
|
||||
return ValidationError{err.Error()}
|
||||
}
|
||||
err = c.Db.saveBlock(block, false)
|
||||
if err != nil {
|
||||
return DatabaseError{err.Error()}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyBlock verifies whether a block is valid according
|
||||
// to the rules of consensus
|
||||
func (c *Chain) verifyBlock(block payload.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyTx verifies whether a transaction is valid according
|
||||
// to the rules of consensus
|
||||
func (c *Chain) VerifyTx(tx transaction.Transactioner) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessHeaders will save the set of headers without validating
|
||||
func (c *Chain) ProcessHeaders(hdrs []*payload.BlockBase) error {
|
||||
|
||||
err := c.verifyHeaders(hdrs)
|
||||
if err != nil {
|
||||
return ValidationError{err.Error()}
|
||||
}
|
||||
err = c.Db.saveHeaders(hdrs)
|
||||
if err != nil {
|
||||
return DatabaseError{err.Error()}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyHeaders will be used to verify a batch of headers
|
||||
// should only ever be called during the initial block download
|
||||
// or when the node receives a HeadersMessage
|
||||
func (c *Chain) verifyHeaders(hdrs []*payload.BlockBase) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentHeight returns the index of the block
|
||||
// at the tip of the chain
|
||||
func (c Chain) CurrentHeight() uint32 {
|
||||
return c.height
|
||||
}
|
372
_pkg.dev/chain/chaindb.go
Normal file
372
_pkg.dev/chain/chaindb.go
Normal file
|
@ -0,0 +1,372 @@
|
|||
package chain
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/database"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload/transaction"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
var (
|
||||
// TX is the prefix used when inserting a tx into the db
|
||||
TX = []byte("TX")
|
||||
// HEADER is the prefix used when inserting a header into the db
|
||||
HEADER = []byte("HE")
|
||||
// LATESTHEADER is the prefix used when inserting the latests header into the db
|
||||
LATESTHEADER = []byte("LH")
|
||||
// UTXO is the prefix used when inserting a utxo into the db
|
||||
UTXO = []byte("UT")
|
||||
// LATESTBLOCK is the prefix used when inserting the latest block into the db
|
||||
LATESTBLOCK = []byte("LB")
|
||||
// BLOCKHASHTX is the prefix used when linking a blockhash to a given tx
|
||||
BLOCKHASHTX = []byte("BT")
|
||||
// BLOCKHASHHEIGHT is the prefix used when linking a blockhash to it's height
|
||||
// This is linked both ways
|
||||
BLOCKHASHHEIGHT = []byte("BH")
|
||||
// SCRIPTHASHUTXO is the prefix used when linking a utxo to a scripthash
|
||||
// This is linked both ways
|
||||
SCRIPTHASHUTXO = []byte("SU")
|
||||
)
|
||||
|
||||
// Chaindb is a wrapper around the db interface which adds an extra block chain specific layer on top.
|
||||
type Chaindb struct {
|
||||
db database.Database
|
||||
}
|
||||
|
||||
// This should not be exported for other callers.
|
||||
// It is safe-guarded by the chain's verification logic
|
||||
func (c *Chaindb) saveBlock(blk payload.Block, genesis bool) error {
|
||||
|
||||
latestBlockTable := database.NewTable(c.db, LATESTBLOCK)
|
||||
hashHeightTable := database.NewTable(c.db, BLOCKHASHHEIGHT)
|
||||
|
||||
// Save Txs and link to block hash
|
||||
err := c.saveTXs(blk.Txs, blk.Hash.Bytes(), genesis)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// LINK block height to hash - Both ways
|
||||
// This allows us to fetch a block using it's hash or it's height
|
||||
// Given the height, we will search the table to get the hash
|
||||
// We can then fetch all transactions in the tx table, which match that block hash
|
||||
height := uint32ToBytes(blk.Index)
|
||||
err = hashHeightTable.Put(height, blk.Hash.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = hashHeightTable.Put(blk.Hash.Bytes(), height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add block as latest block
|
||||
// This also acts a Commit() for the block.
|
||||
// If an error occured, then this will be set to the previous block
|
||||
// This is useful because if the node suddently shut down while saving and the database was not corrupted
|
||||
// Then the node will see the latestBlock as the last saved block, and re-download the faulty block
|
||||
// Note: We check for the latest block on startup
|
||||
return latestBlockTable.Put([]byte(""), blk.Hash.Bytes())
|
||||
}
|
||||
|
||||
// Saves a tx and links each tx to the block it was found in
|
||||
// This should never be exported. Only way to add a tx, is through it's block
|
||||
func (c *Chaindb) saveTXs(txs []transaction.Transactioner, blockHash []byte, genesis bool) error {
|
||||
|
||||
for txIndex, tx := range txs {
|
||||
err := c.saveTx(tx, uint32(txIndex), blockHash, genesis)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Chaindb) saveTx(tx transaction.Transactioner, txIndex uint32, blockHash []byte, genesis bool) error {
|
||||
|
||||
txTable := database.NewTable(c.db, TX)
|
||||
blockTxTable := database.NewTable(c.db, BLOCKHASHTX)
|
||||
|
||||
// Save the whole tx using it's hash a key
|
||||
// In order to find a tx in this table, we need to know it's hash
|
||||
txHash, err := tx.ID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = txTable.Put(txHash.Bytes(), tx.BaseTx().Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// LINK TXhash to block
|
||||
// This allows us to fetch a tx by just knowing what block it was in
|
||||
// This is useful for when we want to re-construct a block from it's hash
|
||||
// In order to ge the tx, we must do a prefix search on blockHash
|
||||
// This will return a set of txHashes.
|
||||
//We can then use these hashes to search the txtable for the tx's we need
|
||||
key := bytesConcat(blockHash, uint32ToBytes(txIndex))
|
||||
err = blockTxTable.Put(key, txHash.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save all of the utxos in a transaction
|
||||
// We do this additional save so that we can form a utxo database
|
||||
// and know when a transaction is a double spend.
|
||||
utxos := tx.BaseTx().Outputs
|
||||
for utxoIndex, utxo := range utxos {
|
||||
err := c.saveUTXO(utxo, uint16(utxoIndex), txHash.Bytes(), blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Do not check for spent utxos on the genesis block
|
||||
if genesis {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove all spent utxos
|
||||
// We do this so that once an output has been spent
|
||||
// It will be removed from the utxo database and cannot be spent again
|
||||
// If the output was never in the utxo database, this function will return an error
|
||||
txos := tx.BaseTx().Inputs
|
||||
for _, txo := range txos {
|
||||
err := c.removeUTXO(txo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveUTxo will save a utxo and link it to it's transaction and block
|
||||
func (c *Chaindb) saveUTXO(utxo *transaction.Output, utxoIndex uint16, txHash, blockHash []byte) error {
|
||||
|
||||
utxoTable := database.NewTable(c.db, UTXO)
|
||||
scripthashUTXOTable := database.NewTable(c.db, SCRIPTHASHUTXO)
|
||||
|
||||
// This is quite messy, we should (if possible) find a way to pass a Writer and Reader interface
|
||||
// Encode utxo into a buffer
|
||||
buf := new(bytes.Buffer)
|
||||
bw := &util.BinWriter{W: buf}
|
||||
if utxo.Encode(bw); bw.Err != nil {
|
||||
return bw.Err
|
||||
}
|
||||
|
||||
// Save UTXO
|
||||
// In order to find a utxo in the utxoTable
|
||||
// One must know the txHash that the utxo was in
|
||||
key := bytesConcat(txHash, uint16ToBytes(utxoIndex))
|
||||
if err := utxoTable.Put(key, buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// LINK utxo to scripthash
|
||||
// This allows us to find a utxo with the scriptHash
|
||||
// Since the key starts with scriptHash, we can look for the scriptHash prefix
|
||||
// and find all utxos for a given scriptHash.
|
||||
// Additionally, we can search for all utxos for a certain user in a certain block with scriptHash+blockHash
|
||||
// But this may not be of use to us. However, note that we cannot have just the scriptHash with the utxoIndex
|
||||
// as this may not be unique. If Kim/Dautt agree, we can change blockHash to blockHeight, which allows us
|
||||
// To get all utxos above a certain blockHeight. Question is; Would this be useful?
|
||||
newKey := bytesConcat(utxo.ScriptHash.Bytes(), blockHash, uint16ToBytes(utxoIndex))
|
||||
if err := scripthashUTXOTable.Put(newKey, key); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := scripthashUTXOTable.Put(key, newKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove
|
||||
func (c *Chaindb) removeUTXO(txo *transaction.Input) error {
|
||||
|
||||
utxoTable := database.NewTable(c.db, UTXO)
|
||||
scripthashUTXOTable := database.NewTable(c.db, SCRIPTHASHUTXO)
|
||||
|
||||
// Remove spent utxos from utxo database
|
||||
key := bytesConcat(txo.PrevHash.Bytes(), uint16ToBytes(txo.PrevIndex))
|
||||
err := utxoTable.Delete(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove utxos from scripthash table
|
||||
otherKey, err := scripthashUTXOTable.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := scripthashUTXOTable.Delete(otherKey); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := scripthashUTXOTable.Delete(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveHeaders will save a set of headers into the database
|
||||
func (c *Chaindb) saveHeaders(headers []*payload.BlockBase) error {
|
||||
|
||||
for _, hdr := range headers {
|
||||
err := c.saveHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveHeader saves a header into the database and updates the latest header
|
||||
// The headers are saved with their `blockheights` as Key
|
||||
// If we want to search for a header, we need to know it's index
|
||||
// Alternatively, we can search the hashHeightTable with the block index to get the hash
|
||||
// If the block has been saved.
|
||||
// The reason why headers are saved with their index as Key, is so that we can
|
||||
// increment the key to find out what block we should fetch next during the initial
|
||||
// block download, when we are saving thousands of headers
|
||||
func (c *Chaindb) saveHeader(hdr *payload.BlockBase) error {
|
||||
|
||||
headerTable := database.NewTable(c.db, HEADER)
|
||||
latestHeaderTable := database.NewTable(c.db, LATESTHEADER)
|
||||
|
||||
index := uint32ToBytes(hdr.Index)
|
||||
|
||||
byt, err := hdr.Bytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = headerTable.Put(index, byt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update latest header
|
||||
return latestHeaderTable.Put([]byte(""), index)
|
||||
}
|
||||
|
||||
// GetHeaderFromHeight will get a header given it's block height
|
||||
func (c *Chaindb) GetHeaderFromHeight(index []byte) (*payload.BlockBase, error) {
|
||||
headerTable := database.NewTable(c.db, HEADER)
|
||||
hdrBytes, err := headerTable.Get(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reader := bytes.NewReader(hdrBytes)
|
||||
|
||||
blockBase := &payload.BlockBase{}
|
||||
err = blockBase.Decode(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockBase, nil
|
||||
}
|
||||
|
||||
// GetLastHeader will get the header which was saved last in the database
|
||||
func (c *Chaindb) GetLastHeader() (*payload.BlockBase, error) {
|
||||
|
||||
latestHeaderTable := database.NewTable(c.db, LATESTHEADER)
|
||||
index, err := latestHeaderTable.Get([]byte(""))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.GetHeaderFromHeight(index)
|
||||
}
|
||||
|
||||
// GetBlockFromHash will return a block given it's hash
|
||||
func (c *Chaindb) GetBlockFromHash(blockHash []byte) (*payload.Block, error) {
|
||||
|
||||
blockTxTable := database.NewTable(c.db, BLOCKHASHTX)
|
||||
|
||||
// To get a block we need to fetch:
|
||||
// The transactions (1)
|
||||
// The header (2)
|
||||
|
||||
// Reconstruct block by fetching it's txs (1)
|
||||
var txs []transaction.Transactioner
|
||||
|
||||
// Get all Txhashes for this block
|
||||
txHashes, err := blockTxTable.Prefix(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get all Tx's given their hash
|
||||
txTable := database.NewTable(c.db, TX)
|
||||
for _, txHash := range txHashes {
|
||||
|
||||
// Fetch tx by it's hash
|
||||
txBytes, err := txTable.Get(txHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reader := bufio.NewReader(bytes.NewReader(txBytes))
|
||||
|
||||
tx, err := transaction.FromReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
// Now fetch the header (2)
|
||||
// We have the block hash, but headers are stored with their `Height` as key.
|
||||
// We first search the `BlockHashHeight` table to get the height.
|
||||
//Then we search the headers table with the height
|
||||
hashHeightTable := database.NewTable(c.db, BLOCKHASHHEIGHT)
|
||||
height, err := hashHeightTable.Get(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr, err := c.GetHeaderFromHeight(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct block
|
||||
block := &payload.Block{
|
||||
BlockBase: *hdr,
|
||||
Txs: txs,
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// GetLastBlock will return the last block that has been saved
|
||||
func (c *Chaindb) GetLastBlock() (*payload.Block, error) {
|
||||
|
||||
latestBlockTable := database.NewTable(c.db, LATESTBLOCK)
|
||||
blockHash, err := latestBlockTable.Get([]byte(""))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.GetBlockFromHash(blockHash)
|
||||
}
|
||||
|
||||
func uint16ToBytes(x uint16) []byte {
|
||||
index := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(index, x)
|
||||
return index
|
||||
}
|
||||
|
||||
func uint32ToBytes(x uint32) []byte {
|
||||
index := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(index, x)
|
||||
return index
|
||||
}
|
||||
|
||||
func bytesConcat(args ...[]byte) []byte {
|
||||
var res []byte
|
||||
for _, arg := range args {
|
||||
res = append(res, arg...)
|
||||
}
|
||||
return res
|
||||
}
|
201
_pkg.dev/chain/chaindb_test.go
Normal file
201
_pkg.dev/chain/chaindb_test.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
package chain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/database"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload/transaction"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
var s = rand.NewSource(time.Now().UnixNano())
|
||||
var r = rand.New(s)
|
||||
|
||||
func TestLastHeader(t *testing.T) {
|
||||
_, cdb, hdrs := saveRandomHeaders(t)
|
||||
|
||||
// Select last header from list of headers
|
||||
lastHeader := hdrs[len(hdrs)-1]
|
||||
// GetLastHeader from the database
|
||||
hdr, err := cdb.GetLastHeader()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, hdr.Index, lastHeader.Index)
|
||||
|
||||
// Clean up
|
||||
os.RemoveAll(database.DbDir)
|
||||
}
|
||||
|
||||
func TestSaveHeader(t *testing.T) {
|
||||
// save headers then fetch a random element
|
||||
|
||||
db, _, hdrs := saveRandomHeaders(t)
|
||||
|
||||
headerTable := database.NewTable(db, HEADER)
|
||||
// check that each header was saved
|
||||
for _, hdr := range hdrs {
|
||||
index := uint32ToBytes(hdr.Index)
|
||||
ok, err := headerTable.Has(index)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
// Clean up
|
||||
os.RemoveAll(database.DbDir)
|
||||
}
|
||||
|
||||
func TestSaveBlock(t *testing.T) {
|
||||
|
||||
// Init databases
|
||||
db, err := database.New("temp.test")
|
||||
assert.Nil(t, err)
|
||||
|
||||
cdb := &Chaindb{db}
|
||||
|
||||
// Construct block0 and block1
|
||||
block0, block1 := twoBlocksLinked(t)
|
||||
|
||||
// Save genesis header
|
||||
err = cdb.saveHeader(&block0.BlockBase)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Save genesis block
|
||||
err = cdb.saveBlock(block0, true)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Test genesis block saved
|
||||
testBlockWasSaved(t, cdb, block0)
|
||||
|
||||
// Save block1 header
|
||||
err = cdb.saveHeader(&block1.BlockBase)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Save block1
|
||||
err = cdb.saveBlock(block1, false)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Test block1 was saved
|
||||
testBlockWasSaved(t, cdb, block1)
|
||||
|
||||
// Clean up
|
||||
os.RemoveAll(database.DbDir)
|
||||
}
|
||||
|
||||
func testBlockWasSaved(t *testing.T, cdb *Chaindb, block payload.Block) {
|
||||
// Fetch last block from database
|
||||
lastBlock, err := cdb.GetLastBlock()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Get byte representation of last block from database
|
||||
byts, err := lastBlock.Bytes()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Get byte representation of block that we saved
|
||||
blockBytes, err := block.Bytes()
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Should be equal
|
||||
assert.True(t, bytes.Equal(byts, blockBytes))
|
||||
}
|
||||
|
||||
func randomHeaders(t *testing.T) []*payload.BlockBase {
|
||||
assert := assert.New(t)
|
||||
hdrsMsg, err := payload.NewHeadersMessage()
|
||||
assert.Nil(err)
|
||||
|
||||
for i := 0; i < 2000; i++ {
|
||||
err = hdrsMsg.AddHeader(randomBlockBase(t))
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
return hdrsMsg.Headers
|
||||
}
|
||||
|
||||
func randomBlockBase(t *testing.T) *payload.BlockBase {
|
||||
|
||||
base := &payload.BlockBase{
|
||||
Version: r.Uint32(),
|
||||
PrevHash: randUint256(t),
|
||||
MerkleRoot: randUint256(t),
|
||||
Timestamp: r.Uint32(),
|
||||
Index: r.Uint32(),
|
||||
ConsensusData: r.Uint64(),
|
||||
NextConsensus: randUint160(t),
|
||||
Witness: transaction.Witness{
|
||||
InvocationScript: []byte{0, 1, 2, 34, 56},
|
||||
VerificationScript: []byte{0, 12, 3, 45, 66},
|
||||
},
|
||||
Hash: randUint256(t),
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
func randomTxs(t *testing.T) []transaction.Transactioner {
|
||||
|
||||
var txs []transaction.Transactioner
|
||||
for i := 0; i < 10; i++ {
|
||||
tx := transaction.NewContract(0)
|
||||
tx.AddInput(transaction.NewInput(randUint256(t), uint16(r.Int())))
|
||||
tx.AddOutput(transaction.NewOutput(randUint256(t), r.Int63(), randUint160(t)))
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func saveRandomHeaders(t *testing.T) (database.Database, *Chaindb, []*payload.BlockBase) {
|
||||
db, err := database.New("temp.test")
|
||||
assert.Nil(t, err)
|
||||
|
||||
cdb := &Chaindb{db}
|
||||
|
||||
hdrs := randomHeaders(t)
|
||||
|
||||
err = cdb.saveHeaders(hdrs)
|
||||
assert.Nil(t, err)
|
||||
return db, cdb, hdrs
|
||||
}
|
||||
|
||||
func randUint256(t *testing.T) util.Uint256 {
|
||||
slice := make([]byte, 32)
|
||||
_, err := r.Read(slice)
|
||||
u, err := util.Uint256DecodeBytes(slice)
|
||||
assert.Nil(t, err)
|
||||
return u
|
||||
}
|
||||
func randUint160(t *testing.T) util.Uint160 {
|
||||
slice := make([]byte, 20)
|
||||
_, err := r.Read(slice)
|
||||
u, err := util.Uint160DecodeBytes(slice)
|
||||
assert.Nil(t, err)
|
||||
return u
|
||||
}
|
||||
|
||||
// twoBlocksLinked will return two blocks, the second block spends from the utxos in the first
|
||||
func twoBlocksLinked(t *testing.T) (payload.Block, payload.Block) {
|
||||
genesisBase := randomBlockBase(t)
|
||||
genesisTxs := randomTxs(t)
|
||||
genesisBlock := payload.Block{BlockBase: *genesisBase, Txs: genesisTxs}
|
||||
|
||||
var txs []transaction.Transactioner
|
||||
|
||||
// Form transactions that spend from the genesis block
|
||||
for _, tx := range genesisTxs {
|
||||
txHash, err := tx.ID()
|
||||
assert.Nil(t, err)
|
||||
newTx := transaction.NewContract(0)
|
||||
newTx.AddInput(transaction.NewInput(txHash, 0))
|
||||
newTx.AddOutput(transaction.NewOutput(randUint256(t), r.Int63(), randUint160(t)))
|
||||
txs = append(txs, newTx)
|
||||
}
|
||||
|
||||
nextBase := randomBlockBase(t)
|
||||
nextBlock := payload.Block{BlockBase: *nextBase, Txs: txs}
|
||||
|
||||
return genesisBlock, nextBlock
|
||||
}
|
19
_pkg.dev/chain/errors.go
Normal file
19
_pkg.dev/chain/errors.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package chain
|
||||
|
||||
// ValidationError occurs when verificatio of the object fails
|
||||
type ValidationError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (v ValidationError) Error() string {
|
||||
return v.msg
|
||||
}
|
||||
|
||||
// DatabaseError occurs when the chain fails to save the object in the database
|
||||
type DatabaseError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (d DatabaseError) Error() string {
|
||||
return d.msg
|
||||
}
|
44
_pkg.dev/chaincfg/chaincfg.go
Normal file
44
_pkg.dev/chaincfg/chaincfg.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package chaincfg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
)
|
||||
|
||||
// Params are the parameters needed to setup the network
|
||||
type Params struct {
|
||||
GenesisBlock payload.Block
|
||||
}
|
||||
|
||||
//NetParams returns the parameters for the chosen network magic
|
||||
func NetParams(magic protocol.Magic) (Params, error) {
|
||||
switch magic {
|
||||
case protocol.MainNet:
|
||||
return mainnet()
|
||||
default:
|
||||
return mainnet()
|
||||
}
|
||||
}
|
||||
|
||||
//Mainnet returns the parameters needed for mainnet
|
||||
func mainnet() (Params, error) {
|
||||
rawHex := "000000000000000000000000000000000000000000000000000000000000000000000000f41bc036e39b0d6b0579c851c6fde83af802fa4e57bec0bc3365eae3abf43f8065fc8857000000001dac2b7c0000000059e75d652b5d3827bf04c165bbe9ef95cca4bf55010001510400001dac2b7c00000000400000455b7b226c616e67223a227a682d434e222c226e616d65223a22e5b08fe89a81e882a1227d2c7b226c616e67223a22656e222c226e616d65223a22416e745368617265227d5d0000c16ff28623000000da1745e9b549bd0bfa1a569971c77eba30cd5a4b00000000400001445b7b226c616e67223a227a682d434e222c226e616d65223a22e5b08fe89a81e5b881227d2c7b226c616e67223a22656e222c226e616d65223a22416e74436f696e227d5d0000c16ff286230008009f7fd096d37ed2c0e3f7f0cfc924beef4ffceb680000000001000000019b7cffdaa674beae0f930ebe6085af9093e5fe56b34a5c220ccdcf6efc336fc50000c16ff28623005fa99d93303775fe50ca119c327759313eccfa1c01000151"
|
||||
rawBytes, err := hex.DecodeString(rawHex)
|
||||
if err != nil {
|
||||
return Params{}, err
|
||||
}
|
||||
reader := bytes.NewReader(rawBytes)
|
||||
|
||||
block := payload.Block{}
|
||||
err = block.Decode(reader)
|
||||
if err != nil {
|
||||
return Params{}, err
|
||||
}
|
||||
|
||||
return Params{
|
||||
GenesisBlock: block,
|
||||
}, nil
|
||||
}
|
13
_pkg.dev/chaincfg/chaincfg_test.go
Normal file
13
_pkg.dev/chaincfg/chaincfg_test.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package chaincfg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMainnet(t *testing.T) {
|
||||
p, err := mainnet()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, p.GenesisBlock.Hash.ReverseString(), "d42561e3d30e15be6400b6df2f328e02d2bf6354c41dce433bc57687c82144bf")
|
||||
}
|
25
_pkg.dev/connmgr/config.go
Executable file
25
_pkg.dev/connmgr/config.go
Executable file
|
@ -0,0 +1,25 @@
|
|||
package connmgr
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// Config contains all methods which will be set by the caller to setup the connection manager.
|
||||
type Config struct {
|
||||
// GetAddress will return a single address for the connection manager to connect to
|
||||
// This will be the source of addresses for the connection manager
|
||||
GetAddress func() (string, error)
|
||||
|
||||
// OnConnection is called by the connection manager when we successfully connect to a peer
|
||||
// The caller should ideally inform the address manager that we have connected to this address in this function
|
||||
OnConnection func(conn net.Conn, addr string)
|
||||
|
||||
// OnAccept will take an established connection
|
||||
OnAccept func(net.Conn)
|
||||
|
||||
// AddressPort is the address port of the local node in the format "address:port"
|
||||
AddressPort string
|
||||
|
||||
// DialTimeout is the amount of time to wait, before we can disconnect a pending dialed connection
|
||||
DialTimeout int
|
||||
}
|
245
_pkg.dev/connmgr/connmgr.go
Executable file
245
_pkg.dev/connmgr/connmgr.go
Executable file
|
@ -0,0 +1,245 @@
|
|||
package connmgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxOutboundConn is the maximum number of active peers
|
||||
// that the connection manager will try to have
|
||||
maxOutboundConn = 10
|
||||
|
||||
// maxRetries is the maximum amount of successive retries that
|
||||
// we can have before we stop dialing that peer
|
||||
maxRetries = uint8(5)
|
||||
)
|
||||
|
||||
// Connmgr manages pending/active/failed cnnections
|
||||
type Connmgr struct {
|
||||
config Config
|
||||
PendingList map[string]*Request
|
||||
ConnectedList map[string]*Request
|
||||
actionch chan func()
|
||||
}
|
||||
|
||||
//New creates a new connection manager
|
||||
func New(cfg Config) (*Connmgr, error) {
|
||||
listener, err := net.Listen("tcp", cfg.AddressPort)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cnnmgr := &Connmgr{
|
||||
cfg,
|
||||
make(map[string]*Request),
|
||||
make(map[string]*Request),
|
||||
make(chan func(), 300),
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
listener.Close()
|
||||
}()
|
||||
|
||||
for {
|
||||
|
||||
conn, err := listener.Accept()
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
go cfg.OnAccept(conn)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
return cnnmgr, nil
|
||||
}
|
||||
|
||||
// NewRequest will make a new connection gets the address from address func in config
|
||||
// Then dials it and assigns it to pending
|
||||
func (c *Connmgr) NewRequest() error {
|
||||
|
||||
// Fetch address
|
||||
addr, err := c.config.GetAddress()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting address " + err.Error())
|
||||
}
|
||||
|
||||
r := &Request{
|
||||
Addr: addr,
|
||||
}
|
||||
return c.Connect(r)
|
||||
}
|
||||
|
||||
// Connect will dial the address in the Request
|
||||
// Updating the request object depending on the outcome
|
||||
func (c *Connmgr) Connect(r *Request) error {
|
||||
|
||||
r.Retries++
|
||||
|
||||
conn, err := c.dial(r.Addr)
|
||||
if err != nil {
|
||||
c.failed(r)
|
||||
return err
|
||||
}
|
||||
|
||||
r.Conn = conn
|
||||
r.Inbound = true
|
||||
|
||||
// r.Permanent is set by the address manager/caller. default is false
|
||||
// The permanent connections will be the ones that are hardcoded, e.g seed3.ngd.network
|
||||
// or are reliable. The connmgr will be more leniennt to permanent addresses as they have
|
||||
// a track record or reputation of being reliable.
|
||||
|
||||
return c.connected(r)
|
||||
}
|
||||
|
||||
//Disconnect will remove the request from the connected/pending list and close the connection
|
||||
func (c *Connmgr) Disconnect(addr string) {
|
||||
|
||||
var r *Request
|
||||
|
||||
// fetch from connected list
|
||||
r, ok := c.ConnectedList[addr]
|
||||
if !ok {
|
||||
// If not in connected, check pending
|
||||
r, _ = c.PendingList[addr]
|
||||
}
|
||||
|
||||
c.disconnected(r)
|
||||
|
||||
}
|
||||
|
||||
// Dial is used to dial up connections given the addres and ip in the form address:port
|
||||
func (c *Connmgr) dial(addr string) (net.Conn, error) {
|
||||
dialTimeout := 1 * time.Second
|
||||
conn, err := net.DialTimeout("tcp", addr, dialTimeout)
|
||||
if err != nil {
|
||||
if !isConnected() {
|
||||
return nil, errors.New("Fatal Error: You do not seem to be connected to the internet")
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
func (c *Connmgr) failed(r *Request) {
|
||||
|
||||
c.actionch <- func() {
|
||||
// priority to check if it is permanent or inbound
|
||||
// if so then these peers are valuable in NEO and so we will just retry another time
|
||||
if r.Inbound || r.Permanent {
|
||||
multiplier := time.Duration(r.Retries * 10)
|
||||
time.AfterFunc(multiplier*time.Second,
|
||||
func() {
|
||||
c.Connect(r)
|
||||
},
|
||||
)
|
||||
// if not then we should check if this request has had maxRetries
|
||||
// if it has then get a new address
|
||||
// if not then call Connect on it again
|
||||
} else if r.Retries > maxRetries {
|
||||
if c.config.GetAddress != nil {
|
||||
go c.NewRequest()
|
||||
}
|
||||
} else {
|
||||
go c.Connect(r)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Disconnected is called when a peer disconnects.
|
||||
// we take the addr from peer, which is also it's key in the map
|
||||
// and we use it to remove it from the connectedList
|
||||
func (c *Connmgr) disconnected(r *Request) error {
|
||||
|
||||
if r == nil {
|
||||
// if object is nil, we return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// if for some reason the underlying connection is not closed, close it
|
||||
err := r.Conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove from any pending/connected list
|
||||
delete(c.PendingList, r.Addr)
|
||||
delete(c.ConnectedList, r.Addr)
|
||||
|
||||
// If permanent,then lets retry
|
||||
if r.Permanent {
|
||||
return c.Connect(r)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//Connected is called when the connection manager makes a successful connection.
|
||||
func (c *Connmgr) connected(r *Request) error {
|
||||
|
||||
// This should not be the case, since we connected
|
||||
if r == nil {
|
||||
return errors.New("request object as nil inside of the connected function")
|
||||
}
|
||||
|
||||
// reset retries to 0
|
||||
r.Retries = 0
|
||||
|
||||
// add to connectedList
|
||||
c.ConnectedList[r.Addr] = r
|
||||
|
||||
// remove from pending if it was there
|
||||
delete(c.PendingList, r.Addr)
|
||||
|
||||
if c.config.OnConnection != nil {
|
||||
c.config.OnConnection(r.Conn, r.Addr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pending is synchronous, we do not want to continue with logic
|
||||
// until we are certain it has been added to the pendingList
|
||||
func (c *Connmgr) pending(r *Request) error {
|
||||
|
||||
if r == nil {
|
||||
return errors.New("request object is nil")
|
||||
}
|
||||
|
||||
c.PendingList[r.Addr] = r
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run will start the connection manager
|
||||
func (c *Connmgr) Run() error {
|
||||
fmt.Println("Connection manager started")
|
||||
go c.loop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Connmgr) loop() {
|
||||
for {
|
||||
select {
|
||||
case f := <-c.actionch:
|
||||
f()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// https://stackoverflow.com/questions/50056144/check-for-internet-connection-from-application
|
||||
func isConnected() (ok bool) {
|
||||
_, err := http.Get("http://clients3.google.com/generate_204")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
107
_pkg.dev/connmgr/connmgr_test.go
Executable file
107
_pkg.dev/connmgr/connmgr_test.go
Executable file
|
@ -0,0 +1,107 @@
|
|||
package connmgr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDial(t *testing.T) {
|
||||
cfg := Config{
|
||||
GetAddress: nil,
|
||||
OnConnection: nil,
|
||||
OnAccept: nil,
|
||||
AddressPort: "",
|
||||
DialTimeout: 0,
|
||||
}
|
||||
|
||||
cm := New(cfg)
|
||||
err := cm.Run()
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
ipport := "google.com:80" // google unlikely to go offline, a better approach to test Dialing is welcome.
|
||||
|
||||
conn, err := cm.dial(ipport)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.NotEqual(t, nil, conn)
|
||||
}
|
||||
func TestConnect(t *testing.T) {
|
||||
cfg := Config{
|
||||
GetAddress: nil,
|
||||
OnConnection: nil,
|
||||
OnAccept: nil,
|
||||
AddressPort: "",
|
||||
DialTimeout: 0,
|
||||
}
|
||||
|
||||
cm := New(cfg)
|
||||
cm.Run()
|
||||
|
||||
ipport := "google.com:80"
|
||||
|
||||
r := Request{Addr: ipport}
|
||||
|
||||
err := cm.Connect(&r)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(cm.ConnectedList))
|
||||
|
||||
}
|
||||
func TestNewRequest(t *testing.T) {
|
||||
|
||||
address := "google.com:80"
|
||||
|
||||
var getAddr = func() (string, error) {
|
||||
return address, nil
|
||||
}
|
||||
|
||||
cfg := Config{
|
||||
GetAddress: getAddr,
|
||||
OnConnection: nil,
|
||||
OnAccept: nil,
|
||||
AddressPort: "",
|
||||
DialTimeout: 0,
|
||||
}
|
||||
|
||||
cm := New(cfg)
|
||||
|
||||
cm.Run()
|
||||
|
||||
cm.NewRequest()
|
||||
|
||||
if _, ok := cm.ConnectedList[address]; ok {
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, 1, len(cm.ConnectedList))
|
||||
return
|
||||
}
|
||||
|
||||
assert.Fail(t, "Could not find the address in the connected lists")
|
||||
|
||||
}
|
||||
func TestDisconnect(t *testing.T) {
|
||||
|
||||
address := "google.com:80"
|
||||
|
||||
var getAddr = func() (string, error) {
|
||||
return address, nil
|
||||
}
|
||||
|
||||
cfg := Config{
|
||||
GetAddress: getAddr,
|
||||
OnConnection: nil,
|
||||
OnAccept: nil,
|
||||
AddressPort: "",
|
||||
DialTimeout: 0,
|
||||
}
|
||||
|
||||
cm := New(cfg)
|
||||
|
||||
cm.Run()
|
||||
|
||||
cm.NewRequest()
|
||||
|
||||
cm.Disconnect(address)
|
||||
|
||||
assert.Equal(t, 0, len(cm.ConnectedList))
|
||||
|
||||
}
|
22
_pkg.dev/connmgr/readme.md
Executable file
22
_pkg.dev/connmgr/readme.md
Executable file
|
@ -0,0 +1,22 @@
|
|||
# Package - Connection Manager
|
||||
|
||||
## Responsibility
|
||||
|
||||
- Manages the active, failed and pending connections for the node.
|
||||
|
||||
## Features
|
||||
|
||||
- Takes an Request, dials it and logs information based on the connectivity.
|
||||
|
||||
- Retry failed connections.
|
||||
|
||||
- Removable address source. The connection manager does not manage addresses, only connections.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
The following methods are exposed from the Connection manager:
|
||||
|
||||
- Connect(r *Request) : This takes a Request object and connects to it. It follow the same logic as NewRequest() however instead of getting the address from the datasource given upon initialisation, you directly feed the address you want to connect to.
|
||||
|
||||
- Disconnect(addrport string) : Given an address:port, this will disconnect it, close the connection and remove it from the connected and pending list, if it was there.
|
15
_pkg.dev/connmgr/request.go
Executable file
15
_pkg.dev/connmgr/request.go
Executable file
|
@ -0,0 +1,15 @@
|
|||
package connmgr
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// Request is a layer on top of connection and allows us to add metadata to the net.Conn
|
||||
// that the connection manager can use to determine whether to retry and other useful heuristics
|
||||
type Request struct {
|
||||
Conn net.Conn
|
||||
Addr string
|
||||
Permanent bool
|
||||
Inbound bool
|
||||
Retries uint8 // should not be trying more than 255 tries
|
||||
}
|
93
_pkg.dev/crypto/aes/aes256.go
Executable file
93
_pkg.dev/crypto/aes/aes256.go
Executable file
|
@ -0,0 +1,93 @@
|
|||
package aes
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
)
|
||||
|
||||
// Encrypt encrypts the key with the given source.
|
||||
func Encrypt(src, key []byte) ([]byte, error) {
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ecb := newECBEncrypter(block)
|
||||
out := make([]byte, len(src))
|
||||
ecb.CryptBlocks(out, src)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Decrypt decrypts the encrypted source with the given key.
|
||||
func Decrypt(crypted, key []byte) ([]byte, error) {
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockMode := newECBDecrypter(block)
|
||||
out := make([]byte, len(crypted))
|
||||
blockMode.CryptBlocks(out, crypted)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type ecb struct {
|
||||
b cipher.Block
|
||||
blockSize int
|
||||
}
|
||||
|
||||
func newECB(b cipher.Block) *ecb {
|
||||
return &ecb{
|
||||
b: b,
|
||||
blockSize: b.BlockSize(),
|
||||
}
|
||||
}
|
||||
|
||||
type ecbEncrypter ecb
|
||||
|
||||
func newECBEncrypter(b cipher.Block) cipher.BlockMode {
|
||||
return (*ecbEncrypter)(newECB(b))
|
||||
}
|
||||
|
||||
func (ecb *ecbEncrypter) BlockSize() int {
|
||||
return ecb.blockSize
|
||||
}
|
||||
|
||||
func (ecb *ecbEncrypter) CryptBlocks(dst, src []byte) {
|
||||
if len(src)%ecb.blockSize != 0 {
|
||||
panic("crypto/cipher: input not full blocks")
|
||||
}
|
||||
if len(dst) < len(src) {
|
||||
panic("crypto/cipher: output smaller than input")
|
||||
}
|
||||
for len(src) > 0 {
|
||||
ecb.b.Encrypt(dst, src[:ecb.blockSize])
|
||||
src = src[ecb.blockSize:]
|
||||
dst = dst[ecb.blockSize:]
|
||||
}
|
||||
}
|
||||
|
||||
type ecbDecrypter ecb
|
||||
|
||||
func newECBDecrypter(b cipher.Block) cipher.BlockMode {
|
||||
return (*ecbDecrypter)(newECB(b))
|
||||
}
|
||||
|
||||
func (ecb ecbDecrypter) BlockSize() int {
|
||||
return ecb.blockSize
|
||||
}
|
||||
|
||||
func (ecb *ecbDecrypter) CryptBlocks(dst, src []byte) {
|
||||
if len(src)%ecb.blockSize != 0 {
|
||||
panic("crypto/cipher: input not full blocks")
|
||||
}
|
||||
if len(dst) < len(src) {
|
||||
panic("crypto/cipher: output smaller than input")
|
||||
}
|
||||
for len(src) > 0 {
|
||||
ecb.b.Decrypt(dst, src[:ecb.blockSize])
|
||||
src = src[ecb.blockSize:]
|
||||
dst = dst[ecb.blockSize:]
|
||||
}
|
||||
}
|
126
_pkg.dev/crypto/base58/base58.go
Executable file
126
_pkg.dev/crypto/base58/base58.go
Executable file
|
@ -0,0 +1,126 @@
|
|||
package base58
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/hash"
|
||||
)
|
||||
|
||||
const prefix rune = '1'
|
||||
|
||||
var decodeMap = map[rune]int64{
|
||||
'1': 0, '2': 1, '3': 2, '4': 3, '5': 4,
|
||||
'6': 5, '7': 6, '8': 7, '9': 8, 'A': 9,
|
||||
'B': 10, 'C': 11, 'D': 12, 'E': 13, 'F': 14,
|
||||
'G': 15, 'H': 16, 'J': 17, 'K': 18, 'L': 19,
|
||||
'M': 20, 'N': 21, 'P': 22, 'Q': 23, 'R': 24,
|
||||
'S': 25, 'T': 26, 'U': 27, 'V': 28, 'W': 29,
|
||||
'X': 30, 'Y': 31, 'Z': 32, 'a': 33, 'b': 34,
|
||||
'c': 35, 'd': 36, 'e': 37, 'f': 38, 'g': 39,
|
||||
'h': 40, 'i': 41, 'j': 42, 'k': 43, 'm': 44,
|
||||
'n': 45, 'o': 46, 'p': 47, 'q': 48, 'r': 49,
|
||||
's': 50, 't': 51, 'u': 52, 'v': 53, 'w': 54,
|
||||
'x': 55, 'y': 56, 'z': 57,
|
||||
}
|
||||
|
||||
// Decode decodes the base58 encoded string.
|
||||
func Decode(s string) ([]byte, error) {
|
||||
var (
|
||||
startIndex = 0
|
||||
zero = 0
|
||||
)
|
||||
for i, c := range s {
|
||||
if c == prefix {
|
||||
zero++
|
||||
} else {
|
||||
startIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
n = big.NewInt(0)
|
||||
div = big.NewInt(58)
|
||||
)
|
||||
for _, c := range s[startIndex:] {
|
||||
charIndex, ok := decodeMap[c]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"invalid character '%c' when decoding this base58 string: '%s'", c, s,
|
||||
)
|
||||
}
|
||||
n.Add(n.Mul(n, div), big.NewInt(charIndex))
|
||||
}
|
||||
|
||||
out := n.Bytes()
|
||||
buf := make([]byte, (zero + len(out)))
|
||||
copy(buf[zero:], out[:])
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Encode encodes a byte slice to be a base58 encoded string.
|
||||
func Encode(bytes []byte) string {
|
||||
var (
|
||||
lookupTable = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
|
||||
x = new(big.Int).SetBytes(bytes)
|
||||
r = new(big.Int)
|
||||
m = big.NewInt(58)
|
||||
zero = big.NewInt(0)
|
||||
encoded string
|
||||
)
|
||||
|
||||
for x.Cmp(zero) > 0 {
|
||||
x.QuoRem(x, m, r)
|
||||
encoded = string(lookupTable[r.Int64()]) + encoded
|
||||
}
|
||||
|
||||
return encoded
|
||||
}
|
||||
|
||||
// CheckDecode decodes the given string.
|
||||
func CheckDecode(s string) (b []byte, err error) {
|
||||
b, err = Decode(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] != '1' {
|
||||
break
|
||||
}
|
||||
b = append([]byte{0x00}, b...)
|
||||
}
|
||||
|
||||
if len(b) < 5 {
|
||||
return nil, fmt.Errorf("Invalid base-58 check string: missing checksum")
|
||||
}
|
||||
|
||||
hash, err := hash.DoubleSha256(b[:len(b)-4])
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not double sha256 data")
|
||||
}
|
||||
|
||||
if bytes.Compare(hash[0:4], b[len(b)-4:]) != 0 {
|
||||
return nil, fmt.Errorf("Invalid base-58 check string: invalid checksum")
|
||||
}
|
||||
|
||||
// Strip the 4 byte long hash.
|
||||
b = b[:len(b)-4]
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// CheckEncode encodes b into a base-58 check encoded string.
|
||||
func CheckEncode(b []byte) (string, error) {
|
||||
hash, err := hash.DoubleSha256(b)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not double sha256 data")
|
||||
}
|
||||
b = append(b, hash[0:4]...)
|
||||
|
||||
return Encode(b), nil
|
||||
}
|
32
_pkg.dev/crypto/base58/base58_test.go
Executable file
32
_pkg.dev/crypto/base58/base58_test.go
Executable file
|
@ -0,0 +1,32 @@
|
|||
package base58
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDecode(t *testing.T) {
|
||||
input := "1F1tAaz5x1HUXrCNLbtMDqcw6o5GNn4xqX"
|
||||
|
||||
data, err := Decode(input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := "0099bc78ba577a95a11f1a344d4d2ae55f2f857b989ea5e5e2"
|
||||
actual := hex.EncodeToString(data)
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
func TestEncode(t *testing.T) {
|
||||
input := "0099bc78ba577a95a11f1a344d4d2ae55f2f857b989ea5e5e2"
|
||||
|
||||
inputBytes, _ := hex.DecodeString(input)
|
||||
|
||||
data := Encode(inputBytes)
|
||||
|
||||
expected := "F1tAaz5x1HUXrCNLbtMDqcw6o5GNn4xqX" // Removed the 1 as it is not checkEncoding
|
||||
actual := data
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
18
_pkg.dev/crypto/elliptic/Readme.md
Executable file
18
_pkg.dev/crypto/elliptic/Readme.md
Executable file
|
@ -0,0 +1,18 @@
|
|||
## Package - Elliptic
|
||||
|
||||
### Why
|
||||
|
||||
The curve and arithmetic functions have been modularised, so that curves can be swapped in and out, without effecting the functionality.
|
||||
|
||||
The modular arithmetic used is not specialised for a specific curve.
|
||||
|
||||
In order to use this package, you must declare an ellipticcurve struct and then set the curve.
|
||||
|
||||
Example:
|
||||
|
||||
`
|
||||
|
||||
curve = NewEllipticCurve(Secp256k1)
|
||||
|
||||
`
|
||||
If no curve is set, the default curve is the r1 curve used for NEO. The tests are done using the k1 curve, so in the elliptic_test.go file, the curve is changed accordingly.
|
64
_pkg.dev/crypto/elliptic/curves.go
Executable file
64
_pkg.dev/crypto/elliptic/curves.go
Executable file
|
@ -0,0 +1,64 @@
|
|||
package elliptic
|
||||
|
||||
/*
|
||||
This file was originally made by vsergeev.
|
||||
|
||||
Modifications have been made under the MIT license.
|
||||
License: MIT
|
||||
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var curve Curve
|
||||
|
||||
type curveType string
|
||||
|
||||
const (
|
||||
// Secp256r1 curve type
|
||||
Secp256r1 curveType = "Secp256r1"
|
||||
// Secp256k1 curve type
|
||||
Secp256k1 curveType = "Secp256k1"
|
||||
)
|
||||
|
||||
// SetCurveSecp256r1 Will set the curve parameters to match Secp256r1
|
||||
func (ChosenCurve *Curve) SetCurveSecp256r1() {
|
||||
ChosenCurve.P, _ = new(big.Int).SetString("FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF", 16) //Q
|
||||
ChosenCurve.A, _ = new(big.Int).SetString("FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC", 16)
|
||||
ChosenCurve.B, _ = new(big.Int).SetString("5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B", 16)
|
||||
ChosenCurve.G.X, _ = new(big.Int).SetString("6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296", 16)
|
||||
ChosenCurve.G.Y, _ = new(big.Int).SetString("4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5", 16)
|
||||
ChosenCurve.N, _ = new(big.Int).SetString("FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551", 16)
|
||||
ChosenCurve.H, _ = new(big.Int).SetString("01", 16)
|
||||
ChosenCurve.Name = "Secp256r1"
|
||||
}
|
||||
|
||||
// SetCurveSecp256k1 Will set the curve parameters to match Secp256k1
|
||||
func (ChosenCurve *Curve) SetCurveSecp256k1() {
|
||||
ChosenCurve.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
|
||||
ChosenCurve.A, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000000", 16)
|
||||
ChosenCurve.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
|
||||
ChosenCurve.G.X, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
|
||||
ChosenCurve.G.Y, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
|
||||
ChosenCurve.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
|
||||
ChosenCurve.H, _ = new(big.Int).SetString("01", 16)
|
||||
ChosenCurve.Name = "Secp256k1"
|
||||
}
|
||||
|
||||
//NewEllipticCurve will instantiate a new EllipticCurve
|
||||
//Defaults to secp256r1
|
||||
func NewEllipticCurve(ct curveType) Curve {
|
||||
var curve Curve
|
||||
switch ct {
|
||||
case Secp256k1:
|
||||
curve.SetCurveSecp256k1()
|
||||
case Secp256r1:
|
||||
curve.SetCurveSecp256r1()
|
||||
default:
|
||||
curve.SetCurveSecp256r1()
|
||||
}
|
||||
return curve
|
||||
}
|
319
_pkg.dev/crypto/elliptic/elliptic.go
Executable file
319
_pkg.dev/crypto/elliptic/elliptic.go
Executable file
|
@ -0,0 +1,319 @@
|
|||
/*
|
||||
This file has been modified under the MIT license.
|
||||
Original: https://github.com/vsergeev/btckeygenie
|
||||
*/
|
||||
|
||||
package elliptic
|
||||
|
||||
import (
|
||||
nativeelliptic "crypto/elliptic"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Point represents a point on an EllipticCurve.
|
||||
type Point struct {
|
||||
X *big.Int
|
||||
Y *big.Int
|
||||
}
|
||||
|
||||
// Curve represents the parameters of a short Weierstrass equation elliptic curve.
|
||||
/* y**2 = x**3 + a*x + b % p */
|
||||
type Curve struct {
|
||||
A *big.Int
|
||||
B *big.Int
|
||||
P *big.Int
|
||||
G Point
|
||||
N *big.Int
|
||||
H *big.Int
|
||||
Name string
|
||||
}
|
||||
|
||||
// dump dumps the bytes of a point for debugging.
|
||||
func (p *Point) dump() {
|
||||
fmt.Print(p.format())
|
||||
}
|
||||
|
||||
// format formats the bytes of a point for debugging.
|
||||
func (p *Point) format() string {
|
||||
if p.X == nil && p.Y == nil {
|
||||
return "(inf,inf)"
|
||||
}
|
||||
return fmt.Sprintf("(%s,%s)", hex.EncodeToString(p.X.Bytes()), hex.EncodeToString(p.Y.Bytes()))
|
||||
}
|
||||
|
||||
// Params represent the paramters for the Elliptic Curve
|
||||
func (ec Curve) Params() *nativeelliptic.CurveParams {
|
||||
return &nativeelliptic.CurveParams{
|
||||
P: ec.P,
|
||||
N: ec.N,
|
||||
B: ec.B,
|
||||
Gx: ec.G.X,
|
||||
Gy: ec.G.Y,
|
||||
BitSize: 256,
|
||||
Name: ec.Name,
|
||||
}
|
||||
}
|
||||
|
||||
/*** Modular Arithmetic ***/
|
||||
|
||||
/* NOTE: Returning a new z each time below is very space inefficient, but the
|
||||
* alternate accumulator based design makes the point arithmetic functions look
|
||||
* absolutely hideous. I may still change this in the future. */
|
||||
|
||||
// addMod computes z = (x + y) % p.
|
||||
func addMod(x *big.Int, y *big.Int, p *big.Int) (z *big.Int) {
|
||||
z = new(big.Int).Add(x, y)
|
||||
z.Mod(z, p)
|
||||
return z
|
||||
}
|
||||
|
||||
// subMod computes z = (x - y) % p.
|
||||
func subMod(x *big.Int, y *big.Int, p *big.Int) (z *big.Int) {
|
||||
z = new(big.Int).Sub(x, y)
|
||||
z.Mod(z, p)
|
||||
return z
|
||||
}
|
||||
|
||||
// mulMod computes z = (x * y) % p.
|
||||
func mulMod(x *big.Int, y *big.Int, p *big.Int) (z *big.Int) {
|
||||
n := new(big.Int).Set(x)
|
||||
z = big.NewInt(0)
|
||||
|
||||
for i := 0; i < y.BitLen(); i++ {
|
||||
if y.Bit(i) == 1 {
|
||||
z = addMod(z, n, p)
|
||||
}
|
||||
n = addMod(n, n, p)
|
||||
}
|
||||
|
||||
return z
|
||||
}
|
||||
|
||||
// invMod computes z = (1/x) % p.
|
||||
func invMod(x *big.Int, p *big.Int) (z *big.Int) {
|
||||
z = new(big.Int).ModInverse(x, p)
|
||||
return z
|
||||
}
|
||||
|
||||
// expMod computes z = (x^e) % p.
|
||||
func expMod(x *big.Int, y *big.Int, p *big.Int) (z *big.Int) {
|
||||
z = new(big.Int).Exp(x, y, p)
|
||||
return z
|
||||
}
|
||||
|
||||
// sqrtMod computes z = sqrt(x) % p.
|
||||
func sqrtMod(x *big.Int, p *big.Int) (z *big.Int) {
|
||||
/* assert that p % 4 == 3 */
|
||||
if new(big.Int).Mod(p, big.NewInt(4)).Cmp(big.NewInt(3)) != 0 {
|
||||
panic("p is not equal to 3 mod 4!")
|
||||
}
|
||||
|
||||
/* z = sqrt(x) % p = x^((p+1)/4) % p */
|
||||
|
||||
/* e = (p+1)/4 */
|
||||
e := new(big.Int).Add(p, big.NewInt(1))
|
||||
e = e.Rsh(e, 2)
|
||||
|
||||
z = expMod(x, e, p)
|
||||
return z
|
||||
}
|
||||
|
||||
/*** Point Arithmetic on Curve ***/
|
||||
|
||||
// IsInfinity checks if point P is infinity on EllipticCurve ec.
|
||||
func (ec *Curve) IsInfinity(P Point) bool {
|
||||
/* We use (nil,nil) to represent O, the point at infinity. */
|
||||
|
||||
if P.X == nil && P.Y == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsOnCurve checks if point P is on EllipticCurve ec.
|
||||
func (ec Curve) IsOnCurve(P1, P2 *big.Int) bool {
|
||||
P := Point{P1, P2}
|
||||
if ec.IsInfinity(P) {
|
||||
return false
|
||||
}
|
||||
|
||||
/* y**2 = x**3 + a*x + b % p */
|
||||
lhs := mulMod(P.Y, P.Y, ec.P)
|
||||
rhs := addMod(
|
||||
addMod(
|
||||
expMod(P.X, big.NewInt(3), ec.P),
|
||||
mulMod(ec.A, P.X, ec.P), ec.P),
|
||||
ec.B, ec.P)
|
||||
|
||||
if lhs.Cmp(rhs) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Add computes R = P + Q on EllipticCurve ec.
|
||||
func (ec Curve) Add(P1, P2, Q1, Q2 *big.Int) (R1 *big.Int, R2 *big.Int) {
|
||||
/* See rules 1-5 on SEC1 pg.7 http://www.secg.org/collateral/sec1_final.pdf */
|
||||
P := Point{P1, P2}
|
||||
Q := Point{Q1, Q2}
|
||||
R := Point{}
|
||||
if ec.IsInfinity(P) && ec.IsInfinity(Q) {
|
||||
/* Rule #1 Identity */
|
||||
/* R = O + O = O */
|
||||
|
||||
R.X = nil
|
||||
R.Y = nil
|
||||
|
||||
} else if ec.IsInfinity(P) {
|
||||
/* Rule #2 Identity */
|
||||
/* R = O + Q = Q */
|
||||
|
||||
R.X = new(big.Int).Set(Q.X)
|
||||
R.Y = new(big.Int).Set(Q.Y)
|
||||
|
||||
} else if ec.IsInfinity(Q) {
|
||||
/* Rule #2 Identity */
|
||||
/* R = P + O = P */
|
||||
|
||||
R.X = new(big.Int).Set(P.X)
|
||||
R.Y = new(big.Int).Set(P.Y)
|
||||
|
||||
} else if P.X.Cmp(Q.X) == 0 && addMod(P.Y, Q.Y, ec.P).Sign() == 0 {
|
||||
/* Rule #3 Identity */
|
||||
/* R = (x,y) + (x,-y) = O */
|
||||
|
||||
R.X = nil
|
||||
R.Y = nil
|
||||
|
||||
} else if P.X.Cmp(Q.X) == 0 && P.Y.Cmp(Q.Y) == 0 && P.Y.Sign() != 0 {
|
||||
/* Rule #5 Point doubling */
|
||||
/* R = P + P */
|
||||
|
||||
/* Lambda = (3*P.X*P.X + a) / (2*P.Y) */
|
||||
num := addMod(
|
||||
mulMod(big.NewInt(3),
|
||||
mulMod(P.X, P.X, ec.P), ec.P),
|
||||
ec.A, ec.P)
|
||||
den := invMod(mulMod(big.NewInt(2), P.Y, ec.P), ec.P)
|
||||
lambda := mulMod(num, den, ec.P)
|
||||
|
||||
/* R.X = lambda*lambda - 2*P.X */
|
||||
R.X = subMod(
|
||||
mulMod(lambda, lambda, ec.P),
|
||||
mulMod(big.NewInt(2), P.X, ec.P),
|
||||
ec.P)
|
||||
/* R.Y = lambda*(P.X - R.X) - P.Y */
|
||||
R.Y = subMod(
|
||||
mulMod(lambda, subMod(P.X, R.X, ec.P), ec.P),
|
||||
P.Y, ec.P)
|
||||
|
||||
} else if P.X.Cmp(Q.X) != 0 {
|
||||
/* Rule #4 Point addition */
|
||||
/* R = P + Q */
|
||||
|
||||
/* Lambda = (Q.Y - P.Y) / (Q.X - P.X) */
|
||||
num := subMod(Q.Y, P.Y, ec.P)
|
||||
den := invMod(subMod(Q.X, P.X, ec.P), ec.P)
|
||||
lambda := mulMod(num, den, ec.P)
|
||||
|
||||
/* R.X = lambda*lambda - P.X - Q.X */
|
||||
R.X = subMod(
|
||||
subMod(
|
||||
mulMod(lambda, lambda, ec.P),
|
||||
P.X, ec.P),
|
||||
Q.X, ec.P)
|
||||
|
||||
/* R.Y = lambda*(P.X - R.X) - P.Y */
|
||||
R.Y = subMod(
|
||||
mulMod(lambda,
|
||||
subMod(P.X, R.X, ec.P), ec.P),
|
||||
P.Y, ec.P)
|
||||
} else {
|
||||
panic(fmt.Sprintf("Unsupported point addition: %v + %v", P.format(), Q.format()))
|
||||
}
|
||||
|
||||
return R.X, R.Y
|
||||
}
|
||||
|
||||
// ScalarMult computes Q = k * P on EllipticCurve ec.
|
||||
func (ec Curve) ScalarMult(P1, P2 *big.Int, l []byte) (Q1, Q2 *big.Int) {
|
||||
/* Note: this function is not constant time, due to the branching nature of
|
||||
* the underlying point Add() function. */
|
||||
|
||||
/* Montgomery Ladder Point Multiplication
|
||||
*
|
||||
* Implementation based on pseudocode here:
|
||||
* See https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication#Montgomery_ladder */
|
||||
|
||||
P := Point{P1, P2}
|
||||
k := big.Int{}
|
||||
k.SetBytes(l)
|
||||
|
||||
var R0 Point
|
||||
var R1 Point
|
||||
|
||||
R0.X = nil
|
||||
R0.Y = nil
|
||||
R1.X = new(big.Int).Set(P.X)
|
||||
R1.Y = new(big.Int).Set(P.Y)
|
||||
|
||||
for i := ec.N.BitLen() - 1; i >= 0; i-- {
|
||||
if k.Bit(i) == 0 {
|
||||
R1.X, R1.Y = ec.Add(R0.X, R0.Y, R1.X, R1.Y)
|
||||
R0.X, R0.Y = ec.Add(R0.X, R0.Y, R0.X, R0.Y)
|
||||
} else {
|
||||
R0.X, R0.Y = ec.Add(R0.X, R0.Y, R1.X, R1.Y)
|
||||
R1.X, R1.Y = ec.Add(R1.X, R1.Y, R1.X, R1.Y)
|
||||
}
|
||||
}
|
||||
|
||||
return R0.X, R0.Y
|
||||
}
|
||||
|
||||
// ScalarBaseMult computes Q = k * G on EllipticCurve ec.
|
||||
func (ec Curve) ScalarBaseMult(k []byte) (Q1, Q2 *big.Int) {
|
||||
|
||||
return ec.ScalarMult(ec.G.X, ec.G.Y, k)
|
||||
}
|
||||
|
||||
// Decompress decompresses coordinate x and ylsb (y's least significant bit) into a Point P on EllipticCurve ec.
|
||||
func (ec *Curve) Decompress(x *big.Int, ylsb uint) (P Point, err error) {
|
||||
/* y**2 = x**3 + a*x + b % p */
|
||||
rhs := addMod(
|
||||
addMod(
|
||||
expMod(x, big.NewInt(3), ec.P),
|
||||
mulMod(ec.A, x, ec.P),
|
||||
ec.P),
|
||||
ec.B, ec.P)
|
||||
|
||||
/* y = sqrt(rhs) % p */
|
||||
y := sqrtMod(rhs, ec.P)
|
||||
|
||||
/* Use -y if opposite lsb is required */
|
||||
if y.Bit(0) != (ylsb & 0x1) {
|
||||
y = subMod(big.NewInt(0), y, ec.P)
|
||||
}
|
||||
|
||||
P.X = x
|
||||
P.Y = y
|
||||
|
||||
if !ec.IsOnCurve(P.X, P.Y) {
|
||||
return P, errors.New("compressed (x, ylsb) not on curve")
|
||||
}
|
||||
|
||||
return P, nil
|
||||
}
|
||||
|
||||
// Double will return the (x1+x1,y1+y1)
|
||||
func (ec Curve) Double(x1, y1 *big.Int) (x, y *big.Int) {
|
||||
x = &big.Int{}
|
||||
x.SetBytes([]byte{0x00})
|
||||
y = &big.Int{}
|
||||
y.SetBytes([]byte{0x00})
|
||||
return x, y
|
||||
}
|
231
_pkg.dev/crypto/elliptic/elliptic_test.go
Executable file
231
_pkg.dev/crypto/elliptic/elliptic_test.go
Executable file
|
@ -0,0 +1,231 @@
|
|||
/* btckeygenie v1.0.0
|
||||
* https://github.com/vsergeev/btckeygenie
|
||||
* License: MIT
|
||||
*/
|
||||
|
||||
package elliptic
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
curve = NewEllipticCurve(Secp256k1)
|
||||
}
|
||||
|
||||
func hex2int(hexstring string) (v *big.Int) {
|
||||
v, _ = new(big.Int).SetString(hexstring, 16)
|
||||
return v
|
||||
}
|
||||
|
||||
func TestOnCurve(t *testing.T) {
|
||||
if !curve.IsOnCurve(curve.G.X, curve.G.Y) {
|
||||
t.Fatal("failure G on curve")
|
||||
}
|
||||
|
||||
t.Log("G on curve")
|
||||
}
|
||||
|
||||
func TestInfinity(t *testing.T) {
|
||||
O := Point{nil, nil}
|
||||
|
||||
/* O not on curve */
|
||||
if curve.IsOnCurve(O.X, O.Y) {
|
||||
t.Fatal("failure O on curve")
|
||||
}
|
||||
|
||||
/* O is infinity */
|
||||
if !curve.IsInfinity(O) {
|
||||
t.Fatal("failure O not infinity on curve")
|
||||
}
|
||||
|
||||
t.Log("O is not on curve and is infinity")
|
||||
}
|
||||
|
||||
func TestPointAdd(t *testing.T) {
|
||||
X := "50863ad64a87ae8a2fe83c1af1a8403cb53f53e486d8511dad8a04887e5b2352"
|
||||
Y := "2cd470243453a299fa9e77237716103abc11a1df38855ed6f2ee187e9c582ba6"
|
||||
|
||||
P := Point{hex2int(X), hex2int(Y)}
|
||||
O := Point{nil, nil}
|
||||
|
||||
/* R = O + O = O */
|
||||
{
|
||||
R1, R2 := curve.Add(O.X, O.Y, O.X, O.Y)
|
||||
R := Point{R1, R2}
|
||||
if !curve.IsInfinity(R) {
|
||||
t.Fatal("failure O + O = O")
|
||||
}
|
||||
t.Log("success O + O = O")
|
||||
}
|
||||
|
||||
/* R = P + O = P */
|
||||
{
|
||||
R1, R2 := curve.Add(P.X, P.Y, O.X, O.Y)
|
||||
R := Point{R1, R2}
|
||||
if R.X.Cmp(P.X) != 0 || R.Y.Cmp(P.Y) != 0 {
|
||||
t.Fatal("failure P + O = P")
|
||||
}
|
||||
t.Log("success P + O = P")
|
||||
}
|
||||
|
||||
/* R = O + Q = Q */
|
||||
{
|
||||
R1, R2 := curve.Add(O.X, O.Y, P.X, P.Y)
|
||||
R := Point{R1, R2}
|
||||
if R.X.Cmp(P.X) != 0 || R.Y.Cmp(P.Y) != 0 {
|
||||
t.Fatal("failure O + Q = Q")
|
||||
}
|
||||
t.Log("success O + Q = Q")
|
||||
}
|
||||
|
||||
/* R = (x,y) + (x,-y) = O */
|
||||
{
|
||||
Q := Point{P.X, subMod(big.NewInt(0), P.Y, curve.P)}
|
||||
|
||||
R1, R2 := curve.Add(P.X, P.Y, Q.X, Q.Y)
|
||||
R := Point{R1, R2}
|
||||
if !curve.IsInfinity(R) {
|
||||
t.Fatal("failure (x,y) + (x,-y) = O")
|
||||
}
|
||||
t.Log("success (x,y) + (x,-y) = O")
|
||||
}
|
||||
|
||||
/* R = P + P */
|
||||
{
|
||||
PP := Point{hex2int("5dbcd5dfea550eb4fd3b5333f533f086bb5267c776e2a1a9d8e84c16a6743d82"), hex2int("8dde3986b6cbe395da64b6e95fb81f8af73f6e0cf1100555005bb4ba2a6a4a07")}
|
||||
|
||||
R1, R2 := curve.Add(P.X, P.Y, P.X, P.Y)
|
||||
R := Point{R1, R2}
|
||||
if R.X.Cmp(PP.X) != 0 || R.Y.Cmp(PP.Y) != 0 {
|
||||
t.Fatal("failure P + P")
|
||||
}
|
||||
t.Log("success P + P")
|
||||
}
|
||||
|
||||
Q := Point{hex2int("a83b8de893467d3a88d959c0eb4032d9ce3bf80f175d4d9e75892a3ebb8ab7e5"), hex2int("370f723328c24b7a97fe34063ba68f253fb08f8645d7c8b9a4ff98e3c29e7f0d")}
|
||||
PQ := Point{hex2int("fe7d540002e4355eb0ec36c217b4735495de7bd8634055ded3683b0e9da70ef1"), hex2int("fc033c1d74cb34e087a3495e505c0fc0e9e3e8297994878d89d882254ce8a9ef")}
|
||||
|
||||
/* R = P + Q */
|
||||
{
|
||||
R1, R2 := curve.Add(P.X, P.Y, Q.X, Q.Y)
|
||||
R := Point{R1, R2}
|
||||
if R.X.Cmp(PQ.X) != 0 || R.Y.Cmp(PQ.Y) != 0 {
|
||||
t.Fatal("failure P + Q")
|
||||
}
|
||||
t.Log("success P + Q")
|
||||
}
|
||||
|
||||
/* R = Q + P */
|
||||
{
|
||||
R1, R2 := curve.Add(Q.X, Q.Y, P.X, P.Y)
|
||||
R := Point{R1, R2}
|
||||
if R.X.Cmp(PQ.X) != 0 || R.Y.Cmp(PQ.Y) != 0 {
|
||||
t.Fatal("failure Q + P")
|
||||
}
|
||||
t.Log("success Q + P")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointScalarMult(t *testing.T) {
|
||||
X := "50863ad64a87ae8a2fe83c1af1a8403cb53f53e486d8511dad8a04887e5b2352"
|
||||
Y := "2cd470243453a299fa9e77237716103abc11a1df38855ed6f2ee187e9c582ba6"
|
||||
P := Point{hex2int(X), hex2int(Y)}
|
||||
|
||||
/* Q = k*P */
|
||||
{
|
||||
T := Point{hex2int("87d592bfdd24adb52147fea343db93e10d0585bc66d91e365c359973c0dc7067"), hex2int("a374e206cb7c8cd1074bdf9bf6ddea135f983aaa6475c9ab3bb4c38a0046541b")}
|
||||
input, _ := hex.DecodeString("14eb373700c3836404acd0820d9fa8dfa098d26177ca6e18b1c7f70c6af8fc18")
|
||||
|
||||
Q1, Q2 := curve.ScalarMult(P.X, P.Y, input)
|
||||
Q := Point{Q1, Q2}
|
||||
if Q.X.Cmp(T.X) != 0 || Q.Y.Cmp(T.Y) != 0 {
|
||||
t.Fatal("failure k*P")
|
||||
}
|
||||
t.Log("success k*P")
|
||||
}
|
||||
|
||||
/* Q = n*G = O */
|
||||
{
|
||||
Q1, Q2 := curve.ScalarMult(curve.G.X, curve.G.Y, curve.N.Bytes())
|
||||
Q := Point{Q1, Q2}
|
||||
if !curve.IsInfinity(Q) {
|
||||
t.Fatal("failure n*G = O")
|
||||
}
|
||||
t.Log("success n*G = O")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointScalarBaseMult(t *testing.T) {
|
||||
/* Sample Private Key */
|
||||
D := "18e14a7b6a307f426a94f8114701e7c8e774e7f9a47e2c2035db29a206321725"
|
||||
/* Sample Corresponding Public Key */
|
||||
X := "50863ad64a87ae8a2fe83c1af1a8403cb53f53e486d8511dad8a04887e5b2352"
|
||||
Y := "2cd470243453a299fa9e77237716103abc11a1df38855ed6f2ee187e9c582ba6"
|
||||
|
||||
P := Point{hex2int(X), hex2int(Y)}
|
||||
|
||||
/* Q = d*G = P */
|
||||
|
||||
Q1, Q2 := curve.ScalarBaseMult(hex2int(D).Bytes())
|
||||
Q := Point{Q1, Q2}
|
||||
if P.X.Cmp(Q.X) != 0 || P.Y.Cmp(Q.Y) != 0 {
|
||||
t.Fatal("failure Q = d*G")
|
||||
}
|
||||
t.Log("success Q = d*G")
|
||||
|
||||
/* Q on curve */
|
||||
if !curve.IsOnCurve(Q.X, Q.Y) {
|
||||
t.Fatal("failure Q on curve")
|
||||
}
|
||||
t.Log("success Q on curve")
|
||||
|
||||
/* R = 0*G = O */
|
||||
R1, R2 := curve.ScalarBaseMult(big.NewInt(0).Bytes())
|
||||
R := Point{R1, R2}
|
||||
if !curve.IsInfinity(R) {
|
||||
t.Fatal("failure 0*G = O")
|
||||
}
|
||||
t.Log("success 0*G = O")
|
||||
}
|
||||
|
||||
func TestPointDecompress(t *testing.T) {
|
||||
/* Valid points */
|
||||
var validDecompressVectors = []Point{
|
||||
{hex2int("50863ad64a87ae8a2fe83c1af1a8403cb53f53e486d8511dad8a04887e5b2352"), hex2int("2cd470243453a299fa9e77237716103abc11a1df38855ed6f2ee187e9c582ba6")},
|
||||
{hex2int("a83b8de893467d3a88d959c0eb4032d9ce3bf80f175d4d9e75892a3ebb8ab7e5"), hex2int("370f723328c24b7a97fe34063ba68f253fb08f8645d7c8b9a4ff98e3c29e7f0d")},
|
||||
{hex2int("f680556678e25084a82fa39e1b1dfd0944f7e69fddaa4e03ce934bd6b291dca0"), hex2int("52c10b721d34447e173721fb0151c68de1106badb089fb661523b8302a9097f5")},
|
||||
{hex2int("241febb8e23cbd77d664a18f66ad6240aaec6ecdc813b088d5b901b2e285131f"), hex2int("513378d9ff94f8d3d6c420bd13981df8cd50fd0fbd0cb5afabb3e66f2750026d")},
|
||||
}
|
||||
|
||||
for i := 0; i < len(validDecompressVectors); i++ {
|
||||
P, err := curve.Decompress(validDecompressVectors[i].X, validDecompressVectors[i].Y.Bit(0))
|
||||
if err != nil {
|
||||
t.Fatalf("failure decompress P, got error %v on index %d", err, i)
|
||||
}
|
||||
if P.X.Cmp(validDecompressVectors[i].X) != 0 || P.Y.Cmp(validDecompressVectors[i].Y) != 0 {
|
||||
t.Fatalf("failure decompress P, got mismatch on index %d", i)
|
||||
}
|
||||
}
|
||||
t.Log("success Decompress() on valid vectors")
|
||||
|
||||
/* Invalid points */
|
||||
var invalidDecompressVectors = []struct {
|
||||
X *big.Int
|
||||
YLsb uint
|
||||
}{
|
||||
{hex2int("c8e337cee51ae9af3c0ef923705a0cb1b76f7e8463b3d3060a1c8d795f9630fd"), 0},
|
||||
{hex2int("c8e337cee51ae9af3c0ef923705a0cb1b76f7e8463b3d3060a1c8d795f9630fd"), 1},
|
||||
}
|
||||
|
||||
for i := 0; i < len(invalidDecompressVectors); i++ {
|
||||
_, err := curve.Decompress(invalidDecompressVectors[i].X, invalidDecompressVectors[i].YLsb)
|
||||
if err == nil {
|
||||
t.Fatalf("failure decompress invalid P, got decompressed point on index %d", i)
|
||||
}
|
||||
}
|
||||
t.Log("success Decompress() on invalid vectors")
|
||||
}
|
88
_pkg.dev/crypto/hash/hash.go
Executable file
88
_pkg.dev/crypto/hash/hash.go
Executable file
|
@ -0,0 +1,88 @@
|
|||
package hash
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
// Sha256 hashes the incoming byte slice
|
||||
// using the sha256 algorithm
|
||||
func Sha256(data []byte) (util.Uint256, error) {
|
||||
var hash util.Uint256
|
||||
hasher := sha256.New()
|
||||
hasher.Reset()
|
||||
_, err := hasher.Write(data)
|
||||
|
||||
hash, err = util.Uint256DecodeBytes(hasher.Sum(nil))
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// DoubleSha256 performs sha256 twice on the given data
|
||||
func DoubleSha256(data []byte) (util.Uint256, error) {
|
||||
var hash util.Uint256
|
||||
|
||||
h1, err := Sha256(data)
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
|
||||
hash, err = Sha256(h1.Bytes())
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// RipeMD160 performs the RIPEMD160 hash algorithm
|
||||
// on the given data
|
||||
func RipeMD160(data []byte) (util.Uint160, error) {
|
||||
var hash util.Uint160
|
||||
hasher := ripemd160.New()
|
||||
hasher.Reset()
|
||||
_, err := io.WriteString(hasher, string(data))
|
||||
|
||||
hash, err = util.Uint160DecodeBytes(hasher.Sum(nil))
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// Hash160 performs sha256 and then ripemd160
|
||||
// on the given data
|
||||
func Hash160(data []byte) (util.Uint160, error) {
|
||||
var hash util.Uint160
|
||||
h1, err := Sha256(data)
|
||||
|
||||
h2, err := RipeMD160(h1.Bytes())
|
||||
|
||||
hash, err = util.Uint160DecodeBytes(h2.Bytes())
|
||||
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// Checksum returns the checksum for a given piece of data
|
||||
// using sha256 twice as the hash algorithm
|
||||
func Checksum(data []byte) ([]byte, error) {
|
||||
hash, err := Sum(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hash[:4], nil
|
||||
}
|
||||
|
||||
// Sum performs sha256 twice on the given data
|
||||
// XXX(issue): We should remove this and just do doublesha256
|
||||
func Sum(b []byte) (util.Uint256, error) {
|
||||
hash, err := DoubleSha256((b))
|
||||
return hash, err
|
||||
}
|
62
_pkg.dev/crypto/hash/hash_test.go
Executable file
62
_pkg.dev/crypto/hash/hash_test.go
Executable file
|
@ -0,0 +1,62 @@
|
|||
package hash
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSha256(t *testing.T) {
|
||||
input := []byte("hello")
|
||||
data, err := Sha256(input)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
|
||||
actual := hex.EncodeToString(data.Bytes()) // MARK: In the DecodeBytes function, there is a bytes reverse, not sure why?
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHashDoubleSha256(t *testing.T) {
|
||||
input := []byte("hello")
|
||||
data, err := DoubleSha256(input)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
firstSha, _ := Sha256(input)
|
||||
doubleSha, _ := Sha256(firstSha.Bytes())
|
||||
expected := hex.EncodeToString(doubleSha.Bytes())
|
||||
|
||||
actual := hex.EncodeToString(data.Bytes())
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHashRipeMD160(t *testing.T) {
|
||||
input := []byte("hello")
|
||||
data, err := RipeMD160(input)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := "108f07b8382412612c048d07d13f814118445acd"
|
||||
actual := hex.EncodeToString(data.Bytes())
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHash160(t *testing.T) {
|
||||
input := "02cccafb41b220cab63fd77108d2d1ebcffa32be26da29a04dca4996afce5f75db"
|
||||
publicKeyBytes, _ := hex.DecodeString(input)
|
||||
data, err := Hash160(publicKeyBytes)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := "c8e2b685cc70ec96743b55beb9449782f8f775d8"
|
||||
actual := hex.EncodeToString(data.Bytes())
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
125
_pkg.dev/crypto/privatekey/privatekey.go
Executable file
125
_pkg.dev/crypto/privatekey/privatekey.go
Executable file
|
@ -0,0 +1,125 @@
|
|||
package privatekey
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/publickey"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/base58"
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/elliptic"
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/hash"
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/rfc6979"
|
||||
)
|
||||
|
||||
// PrivateKey represents a NEO private key.
|
||||
type PrivateKey struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
// NewPrivateKey will create a new private key
|
||||
// With curve as Secp256r1
|
||||
func NewPrivateKey() (*PrivateKey, error) {
|
||||
curve := elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
b := make([]byte, curve.N.BitLen()/8+8)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := new(big.Int).SetBytes(b)
|
||||
d.Mod(d, new(big.Int).Sub(curve.N, big.NewInt(1)))
|
||||
d.Add(d, big.NewInt(1))
|
||||
|
||||
p := &PrivateKey{b: d.Bytes()}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewPrivateKeyFromHex will create a new private key hex string
|
||||
func NewPrivateKeyFromHex(str string) (*PrivateKey, error) {
|
||||
b, err := hex.DecodeString(str)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewPrivateKeyFromBytes(b)
|
||||
}
|
||||
|
||||
// NewPrivateKeyFromBytes returns a NEO PrivateKey from the given byte slice.
|
||||
func NewPrivateKeyFromBytes(b []byte) (*PrivateKey, error) {
|
||||
if len(b) != 32 {
|
||||
return nil, fmt.Errorf(
|
||||
"invalid byte length: expected %d bytes got %d", 32, len(b),
|
||||
)
|
||||
}
|
||||
return &PrivateKey{b}, nil
|
||||
}
|
||||
|
||||
// PublicKey returns a the public corresponding to the private key
|
||||
// For the curve secp256r1
|
||||
func (p *PrivateKey) PublicKey() (*publickey.PublicKey, error) {
|
||||
var (
|
||||
c = elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
q = new(big.Int).SetBytes(p.b)
|
||||
)
|
||||
|
||||
p1, p2 := c.ScalarBaseMult(q.Bytes())
|
||||
point := elliptic.Point{
|
||||
X: p1,
|
||||
Y: p2,
|
||||
}
|
||||
if !c.IsOnCurve(p1, p2) {
|
||||
return nil, errors.New("failed to derive public key using elliptic curve")
|
||||
}
|
||||
|
||||
return &publickey.PublicKey{
|
||||
Curve: c,
|
||||
Point: point,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// WIFEncode will converts a private key
|
||||
// to the Wallet Import Format for NEO
|
||||
func WIFEncode(key []byte) (s string) {
|
||||
if len(key) != 32 {
|
||||
return "invalid private key length"
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
buf.WriteByte(0x80)
|
||||
buf.Write(key)
|
||||
|
||||
buf.WriteByte(0x01)
|
||||
|
||||
checksum, _ := hash.Checksum(buf.Bytes())
|
||||
|
||||
buf.Write(checksum)
|
||||
|
||||
WIF := base58.Encode(buf.Bytes())
|
||||
return WIF
|
||||
}
|
||||
|
||||
// Sign will sign the corresponding data using the private key
|
||||
func (p *PrivateKey) Sign(data []byte) ([]byte, error) {
|
||||
curve := elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
key := p.b
|
||||
digest, _ := hash.Sha256(data)
|
||||
|
||||
r, s, err := rfc6979.SignECDSA(curve, key, digest[:], sha256.New)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curveOrderByteSize := curve.P.BitLen() / 8
|
||||
rBytes, sBytes := r.Bytes(), s.Bytes()
|
||||
signature := make([]byte, curveOrderByteSize*2)
|
||||
copy(signature[curveOrderByteSize-len(rBytes):], rBytes)
|
||||
copy(signature[curveOrderByteSize*2-len(sBytes):], sBytes)
|
||||
|
||||
return signature, nil
|
||||
}
|
48
_pkg.dev/crypto/privatekey/privatekey_test.go
Executable file
48
_pkg.dev/crypto/privatekey/privatekey_test.go
Executable file
|
@ -0,0 +1,48 @@
|
|||
package privatekey
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPrivateKeyToPublicKey(t *testing.T) {
|
||||
input := "495d528227c7dcc234c690af1222e67cde916dac1652cad97e0263825a8268a6"
|
||||
|
||||
privateKey, err := NewPrivateKeyFromHex(input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pubKey, _ := privateKey.PublicKey()
|
||||
pubKeyBytes := pubKey.Bytes()
|
||||
actual := hex.EncodeToString(pubKeyBytes)
|
||||
expected := "03cd4c4ee9c8e1fae9d12ecf7c96cb3a057b550393f9e82182c4dae1139871682e"
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
func TestWIFEncode(t *testing.T) {
|
||||
input := "29bbf53185a973d2e3803cb92908fd08117486d1f2e7bab73ed0d00255511637"
|
||||
inputBytes, _ := hex.DecodeString(input)
|
||||
|
||||
actual := WIFEncode(inputBytes)
|
||||
expected := "KxcqV28rGDcpVR3fYg7R9vricLpyZ8oZhopyFLAWuRv7Y8TE9WhW"
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestSigning(t *testing.T) {
|
||||
// These were taken from the rfcPage:https://tools.ietf.org/html/rfc6979#page-33
|
||||
// public key: U = xG
|
||||
//Ux = 60FED4BA255A9D31C961EB74C6356D68C049B8923B61FA6CE669622E60F29FB6
|
||||
//Uy = 7903FE1008B8BC99A41AE9E95628BC64F2F1B20C2D7E9F5177A3C294D4462299
|
||||
PrivateKey, _ := NewPrivateKeyFromHex("C9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721")
|
||||
|
||||
data, err := PrivateKey.Sign([]byte("sample"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := "EFD48B2AACB6A8FD1140DD9CD45E81D69D2C877B56AAF991C34D0EA84EAF3716"
|
||||
s := "F7CB1C942D657C41D436C7A1B6E29F65F3E900DBB9AFF4064DC4AB2F843ACDA8"
|
||||
assert.Equal(t, strings.ToLower(r+s), hex.EncodeToString(data))
|
||||
}
|
33
_pkg.dev/crypto/publickey/TestHelper/helper.go
Executable file
33
_pkg.dev/crypto/publickey/TestHelper/helper.go
Executable file
|
@ -0,0 +1,33 @@
|
|||
package pubkeytesthelper
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/hash"
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/privatekey"
|
||||
)
|
||||
|
||||
// SignDataWithRandomPrivateKey will sign data with
|
||||
// a random private key, then verify said data
|
||||
// returning true if Verify returns true
|
||||
func SignDataWithRandomPrivateKey(data []byte) (bool, error) {
|
||||
|
||||
hashedData, err := hash.Sha256(data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
privKey, err := privatekey.NewPrivateKey()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
signedData, err := privKey.Sign(data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pubKey, err := privKey.PublicKey()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
result := pubKey.Verify(signedData, hashedData.Bytes())
|
||||
|
||||
return result, nil
|
||||
}
|
34
_pkg.dev/crypto/publickey/TestHelper/helper_test.go
Executable file
34
_pkg.dev/crypto/publickey/TestHelper/helper_test.go
Executable file
|
@ -0,0 +1,34 @@
|
|||
package pubkeytesthelper
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/hash"
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/privatekey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPubKeyVerify(t *testing.T) {
|
||||
actual, err := SignDataWithRandomPrivateKey([]byte("sample"))
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := true
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestWrongPubKey(t *testing.T) {
|
||||
privKey, _ := privatekey.NewPrivateKey()
|
||||
sample := []byte("sample")
|
||||
hashedData, _ := hash.Sha256(sample)
|
||||
signedData, _ := privKey.Sign(sample)
|
||||
|
||||
secondPrivKey, _ := privatekey.NewPrivateKey()
|
||||
wrongPubKey, _ := secondPrivKey.PublicKey()
|
||||
|
||||
actual := wrongPubKey.Verify(signedData, hashedData.Bytes())
|
||||
expcted := false
|
||||
assert.Equal(t, expcted, actual)
|
||||
}
|
164
_pkg.dev/crypto/publickey/publickey.go
Executable file
164
_pkg.dev/crypto/publickey/publickey.go
Executable file
|
@ -0,0 +1,164 @@
|
|||
package publickey
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/base58"
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/elliptic"
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/hash"
|
||||
)
|
||||
|
||||
// PublicKeys is a list of public keys.
|
||||
type PublicKeys []*PublicKey
|
||||
|
||||
func (keys PublicKeys) Len() int { return len(keys) }
|
||||
func (keys PublicKeys) Swap(i, j int) { keys[i], keys[j] = keys[j], keys[i] }
|
||||
func (keys PublicKeys) Less(i, j int) bool {
|
||||
|
||||
if keys[i].X.Cmp(keys[j].X) == -1 {
|
||||
return true
|
||||
}
|
||||
if keys[i].X.Cmp(keys[j].X) == 1 {
|
||||
return false
|
||||
}
|
||||
if keys[i].X.Cmp(keys[j].X) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return keys[i].Y.Cmp(keys[j].Y) == -1
|
||||
}
|
||||
|
||||
// PublicKey represents a public key and provides a high level
|
||||
// API around the ECPoint.
|
||||
type PublicKey struct {
|
||||
Curve elliptic.Curve
|
||||
elliptic.Point
|
||||
}
|
||||
|
||||
// NewPublicKeyFromString return a public key created from the
|
||||
// given hex string.
|
||||
func NewPublicKeyFromString(s string) (*PublicKey, error) {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
curve := elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
|
||||
pubKey := &PublicKey{curve, elliptic.Point{}}
|
||||
|
||||
if err := pubKey.DecodeBinary(bytes.NewReader(b)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// Bytes returns the byte array representation of the public key.
|
||||
func (p *PublicKey) Bytes() []byte {
|
||||
if p.Curve.IsInfinity(p.Point) {
|
||||
return []byte{0x00}
|
||||
}
|
||||
|
||||
var (
|
||||
x = p.X.Bytes()
|
||||
paddedX = append(bytes.Repeat([]byte{0x00}, 32-len(x)), x...)
|
||||
prefix = byte(0x03)
|
||||
)
|
||||
|
||||
if p.Y.Bit(0) == 0 {
|
||||
prefix = byte(0x02)
|
||||
}
|
||||
|
||||
return append([]byte{prefix}, paddedX...)
|
||||
}
|
||||
|
||||
// ToAddress will convert a public key to it's neo-address
|
||||
func (p *PublicKey) ToAddress() string {
|
||||
|
||||
publicKeyBytes := p.Bytes()
|
||||
|
||||
publicKeyBytes = append([]byte{0x21}, publicKeyBytes...) // 0x21 = length of pubKey
|
||||
publicKeyBytes = append(publicKeyBytes, 0xAC) // 0xAC = CheckSig
|
||||
|
||||
hash160PubKey, _ := hash.Hash160(publicKeyBytes)
|
||||
|
||||
versionHash160PubKey := append([]byte{0x17}, hash160PubKey.Bytes()...)
|
||||
|
||||
checksum, _ := hash.Checksum(versionHash160PubKey)
|
||||
|
||||
checkVersionHash160 := append(versionHash160PubKey, checksum...)
|
||||
|
||||
address := base58.Encode(checkVersionHash160)
|
||||
|
||||
return address
|
||||
}
|
||||
|
||||
// DecodeBinary decodes a PublicKey from the given io.Reader.
|
||||
func (p *PublicKey) DecodeBinary(r io.Reader) error {
|
||||
|
||||
var prefix uint8
|
||||
if err := binary.Read(r, binary.LittleEndian, &prefix); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Infinity
|
||||
if prefix == 0x00 {
|
||||
p.Point = elliptic.Point{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compressed public keys.
|
||||
if prefix == 0x02 || prefix == 0x03 {
|
||||
|
||||
b := make([]byte, 32)
|
||||
if err := binary.Read(r, binary.LittleEndian, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
p.Point, err = p.Curve.Decompress(new(big.Int).SetBytes(b), uint(prefix&0x1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else if prefix == 0x04 {
|
||||
buf := make([]byte, 65)
|
||||
if err := binary.Read(r, binary.LittleEndian, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
p.X = new(big.Int).SetBytes(buf[1:33])
|
||||
p.Y = new(big.Int).SetBytes(buf[33:65])
|
||||
} else {
|
||||
return fmt.Errorf("invalid prefix %d", prefix)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeBinary encodes a PublicKey to the given io.Writer.
|
||||
func (p *PublicKey) EncodeBinary(w io.Writer) error {
|
||||
return binary.Write(w, binary.LittleEndian, p.Bytes())
|
||||
}
|
||||
|
||||
// Verify returns true if the signature is valid and corresponds
|
||||
// to the hash and public key
|
||||
func (p *PublicKey) Verify(signature []byte, hash []byte) bool {
|
||||
|
||||
publicKey := &ecdsa.PublicKey{}
|
||||
publicKey.Curve = p.Curve
|
||||
publicKey.X = p.X
|
||||
publicKey.Y = p.Y
|
||||
if p.X == nil || p.Y == nil {
|
||||
return false
|
||||
}
|
||||
rBytes := new(big.Int).SetBytes(signature[0:32])
|
||||
sBytes := new(big.Int).SetBytes(signature[32:64])
|
||||
return ecdsa.Verify(publicKey, hash, rBytes, sBytes)
|
||||
}
|
81
_pkg.dev/crypto/publickey/publickey_test.go
Executable file
81
_pkg.dev/crypto/publickey/publickey_test.go
Executable file
|
@ -0,0 +1,81 @@
|
|||
package publickey
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/elliptic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDecodeFromString(t *testing.T) {
|
||||
str := "03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c"
|
||||
pubKey, err := NewPublicKeyFromString(str)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, str, hex.EncodeToString(pubKey.Bytes()))
|
||||
}
|
||||
|
||||
func TestEncodeDecodeInfinity(t *testing.T) {
|
||||
|
||||
curve := elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
|
||||
key := &PublicKey{curve, elliptic.Point{}}
|
||||
buf := new(bytes.Buffer)
|
||||
assert.Nil(t, key.EncodeBinary(buf))
|
||||
assert.Equal(t, 1, buf.Len())
|
||||
|
||||
keyDecode := &PublicKey{}
|
||||
assert.Nil(t, keyDecode.DecodeBinary(buf))
|
||||
assert.Equal(t, []byte{0x00}, keyDecode.Bytes())
|
||||
}
|
||||
|
||||
func TestEncodeDecodePublicKey(t *testing.T) {
|
||||
curve := elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
p := &PublicKey{curve, randomECPoint()}
|
||||
buf := new(bytes.Buffer)
|
||||
assert.Nil(t, p.EncodeBinary(buf))
|
||||
|
||||
pDecode := &PublicKey{curve, elliptic.Point{}}
|
||||
assert.Nil(t, pDecode.DecodeBinary(buf))
|
||||
assert.Equal(t, p.X, pDecode.X)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubkeyToAddress(t *testing.T) {
|
||||
|
||||
pubKey, err := NewPublicKeyFromString("031ee4e73a17d8f76dc02532e2620bcb12425b33c0c9f9694cc2caa8226b68cad4")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
actual := pubKey.ToAddress()
|
||||
expected := "AUpGsNCHzSimeMRVPQfhwrVdiUp8Q2N2Qx"
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func randomECPoint() elliptic.Point {
|
||||
curve := elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
b := make([]byte, curve.N.BitLen()/8+8)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return elliptic.Point{}
|
||||
}
|
||||
|
||||
d := new(big.Int).SetBytes(b)
|
||||
d.Mod(d, new(big.Int).Sub(curve.N, big.NewInt(1)))
|
||||
d.Add(d, big.NewInt(1))
|
||||
|
||||
q := new(big.Int).SetBytes(d.Bytes())
|
||||
P1, P2 := curve.ScalarBaseMult(q.Bytes())
|
||||
return elliptic.Point{
|
||||
X: P1,
|
||||
Y: P2,
|
||||
}
|
||||
}
|
21
_pkg.dev/crypto/rfc6979/LICENSE
Executable file
21
_pkg.dev/crypto/rfc6979/LICENSE
Executable file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Coda Hale
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
45
_pkg.dev/crypto/rfc6979/dsa.go
Executable file
45
_pkg.dev/crypto/rfc6979/dsa.go
Executable file
|
@ -0,0 +1,45 @@
|
|||
package rfc6979
|
||||
|
||||
import (
|
||||
"crypto/dsa"
|
||||
"hash"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// SignDSA signs an arbitrary length hash (which should be the result of hashing
|
||||
// a larger message) using the private key, priv. It returns the signature as a
|
||||
// pair of integers.
|
||||
//
|
||||
// Note that FIPS 186-3 section 4.6 specifies that the hash should be truncated
|
||||
// to the byte-length of the subgroup. This function does not perform that
|
||||
// truncation itself.
|
||||
func SignDSA(priv *dsa.PrivateKey, hash []byte, alg func() hash.Hash) (r, s *big.Int, err error) {
|
||||
n := priv.Q.BitLen()
|
||||
if n&7 != 0 {
|
||||
err = dsa.ErrInvalidPublicKey
|
||||
return
|
||||
}
|
||||
n >>= 3
|
||||
|
||||
generateSecret(priv.Q, priv.X, alg, hash, func(k *big.Int) bool {
|
||||
inv := new(big.Int).ModInverse(k, priv.Q)
|
||||
r = new(big.Int).Exp(priv.G, k, priv.P)
|
||||
r.Mod(r, priv.Q)
|
||||
|
||||
if r.Sign() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
z := new(big.Int).SetBytes(hash)
|
||||
|
||||
s = new(big.Int).Mul(priv.X, r)
|
||||
s.Add(s, z)
|
||||
s.Mod(s, priv.Q)
|
||||
s.Mul(s, inv)
|
||||
s.Mod(s, priv.Q)
|
||||
|
||||
return s.Sign() != 0
|
||||
})
|
||||
|
||||
return
|
||||
}
|
270
_pkg.dev/crypto/rfc6979/dsa_test.go
Executable file
270
_pkg.dev/crypto/rfc6979/dsa_test.go
Executable file
|
@ -0,0 +1,270 @@
|
|||
package rfc6979_test
|
||||
|
||||
import (
|
||||
"crypto/dsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/o3labs/neo-utils/neoutils/rfc6979"
|
||||
)
|
||||
|
||||
type dsaFixture struct {
|
||||
name string
|
||||
key *dsaKey
|
||||
alg func() hash.Hash
|
||||
message string
|
||||
r, s string
|
||||
}
|
||||
|
||||
type dsaKey struct {
|
||||
key *dsa.PrivateKey
|
||||
subgroup int
|
||||
}
|
||||
|
||||
var dsa1024 = &dsaKey{
|
||||
key: &dsa.PrivateKey{
|
||||
PublicKey: dsa.PublicKey{
|
||||
Parameters: dsa.Parameters{
|
||||
P: dsaLoadInt("86F5CA03DCFEB225063FF830A0C769B9DD9D6153AD91D7CE27F787C43278B447E6533B86B18BED6E8A48B784A14C252C5BE0DBF60B86D6385BD2F12FB763ED8873ABFD3F5BA2E0A8C0A59082EAC056935E529DAF7C610467899C77ADEDFC846C881870B7B19B2B58F9BE0521A17002E3BDD6B86685EE90B3D9A1B02B782B1779"),
|
||||
Q: dsaLoadInt("996F967F6C8E388D9E28D01E205FBA957A5698B1"),
|
||||
G: dsaLoadInt("07B0F92546150B62514BB771E2A0C0CE387F03BDA6C56B505209FF25FD3C133D89BBCD97E904E09114D9A7DEFDEADFC9078EA544D2E401AEECC40BB9FBBF78FD87995A10A1C27CB7789B594BA7EFB5C4326A9FE59A070E136DB77175464ADCA417BE5DCE2F40D10A46A3A3943F26AB7FD9C0398FF8C76EE0A56826A8A88F1DBD"),
|
||||
},
|
||||
Y: dsaLoadInt("5DF5E01DED31D0297E274E1691C192FE5868FEF9E19A84776454B100CF16F65392195A38B90523E2542EE61871C0440CB87C322FC4B4D2EC5E1E7EC766E1BE8D4CE935437DC11C3C8FD426338933EBFE739CB3465F4D3668C5E473508253B1E682F65CBDC4FAE93C2EA212390E54905A86E2223170B44EAA7DA5DD9FFCFB7F3B"),
|
||||
},
|
||||
X: dsaLoadInt("411602CB19A6CCC34494D79D98EF1E7ED5AF25F7"),
|
||||
},
|
||||
subgroup: 160,
|
||||
}
|
||||
|
||||
var dsa2048 = &dsaKey{
|
||||
key: &dsa.PrivateKey{
|
||||
PublicKey: dsa.PublicKey{
|
||||
Parameters: dsa.Parameters{
|
||||
P: dsaLoadInt("9DB6FB5951B66BB6FE1E140F1D2CE5502374161FD6538DF1648218642F0B5C48C8F7A41AADFA187324B87674FA1822B00F1ECF8136943D7C55757264E5A1A44FFE012E9936E00C1D3E9310B01C7D179805D3058B2A9F4BB6F9716BFE6117C6B5B3CC4D9BE341104AD4A80AD6C94E005F4B993E14F091EB51743BF33050C38DE235567E1B34C3D6A5C0CEAA1A0F368213C3D19843D0B4B09DCB9FC72D39C8DE41F1BF14D4BB4563CA28371621CAD3324B6A2D392145BEBFAC748805236F5CA2FE92B871CD8F9C36D3292B5509CA8CAA77A2ADFC7BFD77DDA6F71125A7456FEA153E433256A2261C6A06ED3693797E7995FAD5AABBCFBE3EDA2741E375404AE25B"),
|
||||
Q: dsaLoadInt("F2C3119374CE76C9356990B465374A17F23F9ED35089BD969F61C6DDE9998C1F"),
|
||||
G: dsaLoadInt("5C7FF6B06F8F143FE8288433493E4769C4D988ACE5BE25A0E24809670716C613D7B0CEE6932F8FAA7C44D2CB24523DA53FBE4F6EC3595892D1AA58C4328A06C46A15662E7EAA703A1DECF8BBB2D05DBE2EB956C142A338661D10461C0D135472085057F3494309FFA73C611F78B32ADBB5740C361C9F35BE90997DB2014E2EF5AA61782F52ABEB8BD6432C4DD097BC5423B285DAFB60DC364E8161F4A2A35ACA3A10B1C4D203CC76A470A33AFDCBDD92959859ABD8B56E1725252D78EAC66E71BA9AE3F1DD2487199874393CD4D832186800654760E1E34C09E4D155179F9EC0DC4473F996BDCE6EED1CABED8B6F116F7AD9CF505DF0F998E34AB27514B0FFE7"),
|
||||
},
|
||||
Y: dsaLoadInt("667098C654426C78D7F8201EAC6C203EF030D43605032C2F1FA937E5237DBD949F34A0A2564FE126DC8B715C5141802CE0979C8246463C40E6B6BDAA2513FA611728716C2E4FD53BC95B89E69949D96512E873B9C8F8DFD499CC312882561ADECB31F658E934C0C197F2C4D96B05CBAD67381E7B768891E4DA3843D24D94CDFB5126E9B8BF21E8358EE0E0A30EF13FD6A664C0DCE3731F7FB49A4845A4FD8254687972A2D382599C9BAC4E0ED7998193078913032558134976410B89D2C171D123AC35FD977219597AA7D15C1A9A428E59194F75C721EBCBCFAE44696A499AFA74E04299F132026601638CB87AB79190D4A0986315DA8EEC6561C938996BEADF"),
|
||||
},
|
||||
X: dsaLoadInt("69C7548C21D0DFEA6B9A51C9EAD4E27C33D3B3F180316E5BCAB92C933F0E4DBC"),
|
||||
},
|
||||
subgroup: 256,
|
||||
}
|
||||
|
||||
var dsaFixtures = []dsaFixture{
|
||||
// DSA, 1024 Bits
|
||||
// https://tools.ietf.org/html/rfc6979#appendix-A.2.1
|
||||
dsaFixture{
|
||||
name: "1024/SHA-1 #1",
|
||||
key: dsa1024,
|
||||
alg: sha1.New,
|
||||
message: "sample",
|
||||
r: "2E1A0C2562B2912CAAF89186FB0F42001585DA55",
|
||||
s: "29EFB6B0AFF2D7A68EB70CA313022253B9A88DF5",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-224 #1",
|
||||
key: dsa1024,
|
||||
alg: sha256.New224,
|
||||
message: "sample",
|
||||
r: "4BC3B686AEA70145856814A6F1BB53346F02101E",
|
||||
s: "410697B92295D994D21EDD2F4ADA85566F6F94C1",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-256 #1",
|
||||
key: dsa1024,
|
||||
alg: sha256.New,
|
||||
message: "sample",
|
||||
r: "81F2F5850BE5BC123C43F71A3033E9384611C545",
|
||||
s: "4CDD914B65EB6C66A8AAAD27299BEE6B035F5E89",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-384 #1",
|
||||
key: dsa1024,
|
||||
alg: sha512.New384,
|
||||
message: "sample",
|
||||
r: "07F2108557EE0E3921BC1774F1CA9B410B4CE65A",
|
||||
s: "54DF70456C86FAC10FAB47C1949AB83F2C6F7595",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-512 #1",
|
||||
key: dsa1024,
|
||||
alg: sha512.New,
|
||||
message: "sample",
|
||||
r: "16C3491F9B8C3FBBDD5E7A7B667057F0D8EE8E1B",
|
||||
s: "02C36A127A7B89EDBB72E4FFBC71DABC7D4FC69C",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-1 #2",
|
||||
key: dsa1024,
|
||||
alg: sha1.New,
|
||||
message: "test",
|
||||
r: "42AB2052FD43E123F0607F115052A67DCD9C5C77",
|
||||
s: "183916B0230D45B9931491D4C6B0BD2FB4AAF088",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-224 #2",
|
||||
key: dsa1024,
|
||||
alg: sha256.New224,
|
||||
message: "test",
|
||||
r: "6868E9964E36C1689F6037F91F28D5F2C30610F2",
|
||||
s: "49CEC3ACDC83018C5BD2674ECAAD35B8CD22940F",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-256 #2",
|
||||
key: dsa1024,
|
||||
alg: sha256.New,
|
||||
message: "test",
|
||||
r: "22518C127299B0F6FDC9872B282B9E70D0790812",
|
||||
s: "6837EC18F150D55DE95B5E29BE7AF5D01E4FE160",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-384 #2",
|
||||
key: dsa1024,
|
||||
alg: sha512.New384,
|
||||
message: "test",
|
||||
r: "854CF929B58D73C3CBFDC421E8D5430CD6DB5E66",
|
||||
s: "91D0E0F53E22F898D158380676A871A157CDA622",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "1024/SHA-512 #2",
|
||||
key: dsa1024,
|
||||
alg: sha512.New,
|
||||
message: "test",
|
||||
r: "8EA47E475BA8AC6F2D821DA3BD212D11A3DEB9A0",
|
||||
s: "7C670C7AD72B6C050C109E1790008097125433E8",
|
||||
},
|
||||
|
||||
// DSA, 2048 Bits
|
||||
// https://tools.ietf.org/html/rfc6979#appendix-A.2.2
|
||||
dsaFixture{
|
||||
name: "2048/SHA-1 #1",
|
||||
key: dsa2048,
|
||||
alg: sha1.New,
|
||||
message: "sample",
|
||||
r: "3A1B2DBD7489D6ED7E608FD036C83AF396E290DBD602408E8677DAABD6E7445A",
|
||||
s: "D26FCBA19FA3E3058FFC02CA1596CDBB6E0D20CB37B06054F7E36DED0CDBBCCF",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-224 #1",
|
||||
key: dsa2048,
|
||||
alg: sha256.New224,
|
||||
message: "sample",
|
||||
r: "DC9F4DEADA8D8FF588E98FED0AB690FFCE858DC8C79376450EB6B76C24537E2C",
|
||||
s: "A65A9C3BC7BABE286B195D5DA68616DA8D47FA0097F36DD19F517327DC848CEC",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-256 #1",
|
||||
key: dsa2048,
|
||||
alg: sha256.New,
|
||||
message: "sample",
|
||||
r: "EACE8BDBBE353C432A795D9EC556C6D021F7A03F42C36E9BC87E4AC7932CC809",
|
||||
s: "7081E175455F9247B812B74583E9E94F9EA79BD640DC962533B0680793A38D53",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-384 #1",
|
||||
key: dsa2048,
|
||||
alg: sha512.New384,
|
||||
message: "sample",
|
||||
r: "B2DA945E91858834FD9BF616EBAC151EDBC4B45D27D0DD4A7F6A22739F45C00B",
|
||||
s: "19048B63D9FD6BCA1D9BAE3664E1BCB97F7276C306130969F63F38FA8319021B",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-512 #1",
|
||||
key: dsa2048,
|
||||
alg: sha512.New,
|
||||
message: "sample",
|
||||
r: "2016ED092DC5FB669B8EFB3D1F31A91EECB199879BE0CF78F02BA062CB4C942E",
|
||||
s: "D0C76F84B5F091E141572A639A4FB8C230807EEA7D55C8A154A224400AFF2351",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-1 #2",
|
||||
key: dsa2048,
|
||||
alg: sha1.New,
|
||||
message: "test",
|
||||
r: "C18270A93CFC6063F57A4DFA86024F700D980E4CF4E2CB65A504397273D98EA0",
|
||||
s: "414F22E5F31A8B6D33295C7539C1C1BA3A6160D7D68D50AC0D3A5BEAC2884FAA",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-224 #2",
|
||||
key: dsa2048,
|
||||
alg: sha256.New224,
|
||||
message: "test",
|
||||
r: "272ABA31572F6CC55E30BF616B7A265312018DD325BE031BE0CC82AA17870EA3",
|
||||
s: "E9CC286A52CCE201586722D36D1E917EB96A4EBDB47932F9576AC645B3A60806",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-256 #2",
|
||||
key: dsa2048,
|
||||
alg: sha256.New,
|
||||
message: "test",
|
||||
r: "8190012A1969F9957D56FCCAAD223186F423398D58EF5B3CEFD5A4146A4476F0",
|
||||
s: "7452A53F7075D417B4B013B278D1BB8BBD21863F5E7B1CEE679CF2188E1AB19E",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-384 #2",
|
||||
key: dsa2048,
|
||||
alg: sha512.New384,
|
||||
message: "test",
|
||||
r: "239E66DDBE8F8C230A3D071D601B6FFBDFB5901F94D444C6AF56F732BEB954BE",
|
||||
s: "6BD737513D5E72FE85D1C750E0F73921FE299B945AAD1C802F15C26A43D34961",
|
||||
},
|
||||
dsaFixture{
|
||||
name: "2048/SHA-512 #2",
|
||||
key: dsa2048,
|
||||
alg: sha512.New,
|
||||
message: "test",
|
||||
r: "89EC4BB1400ECCFF8E7D9AA515CD1DE7803F2DAFF09693EE7FD1353E90A68307",
|
||||
s: "C9F0BDABCC0D880BB137A994CC7F3980CE91CC10FAF529FC46565B15CEA854E1",
|
||||
},
|
||||
}
|
||||
|
||||
func TestDSASignatures(t *testing.T) {
|
||||
for _, f := range dsaFixtures {
|
||||
testDsaFixture(&f, t)
|
||||
}
|
||||
}
|
||||
|
||||
func testDsaFixture(f *dsaFixture, t *testing.T) {
|
||||
t.Logf("Testing %s", f.name)
|
||||
|
||||
h := f.alg()
|
||||
h.Write([]byte(f.message))
|
||||
digest := h.Sum(nil)
|
||||
|
||||
g := f.key.subgroup / 8
|
||||
if len(digest) > g {
|
||||
digest = digest[0:g]
|
||||
}
|
||||
|
||||
r, s, err := rfc6979.SignDSA(f.key.key, digest, f.alg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
expectedR := dsaLoadInt(f.r)
|
||||
expectedS := dsaLoadInt(f.s)
|
||||
|
||||
if r.Cmp(expectedR) != 0 {
|
||||
t.Errorf("%s: Expected R of %X, got %X", f.name, expectedR, r)
|
||||
}
|
||||
|
||||
if s.Cmp(expectedS) != 0 {
|
||||
t.Errorf("%s: Expected S of %X, got %X", f.name, expectedS, s)
|
||||
}
|
||||
}
|
||||
|
||||
func dsaLoadInt(s string) *big.Int {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return new(big.Int).SetBytes(b)
|
||||
}
|
59
_pkg.dev/crypto/rfc6979/ecdsa.go
Executable file
59
_pkg.dev/crypto/rfc6979/ecdsa.go
Executable file
|
@ -0,0 +1,59 @@
|
|||
package rfc6979
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"math/big"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/elliptic"
|
||||
)
|
||||
|
||||
// SignECDSA signs an arbitrary length hash (which should be the result of
|
||||
// hashing a larger message) using the private key, priv. It returns the
|
||||
// signature as a pair of integers.
|
||||
//
|
||||
// Note that FIPS 186-3 section 4.6 specifies that the hash should be truncated
|
||||
// to the byte-length of the subgroup. This function does not perform that
|
||||
// truncation itself.
|
||||
func SignECDSA(curve elliptic.Curve, priv []byte, hash []byte, alg func() hash.Hash) (r, s *big.Int, err error) {
|
||||
c := curve
|
||||
N := c.N
|
||||
D := new(big.Int)
|
||||
D.SetBytes(priv)
|
||||
generateSecret(N, D, alg, hash, func(k *big.Int) bool {
|
||||
|
||||
inv := new(big.Int).ModInverse(k, N)
|
||||
|
||||
r, _ = curve.ScalarBaseMult(k.Bytes())
|
||||
r.Mod(r, N)
|
||||
|
||||
if r.Sign() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
e := hashToInt(hash, c)
|
||||
s = new(big.Int).Mul(D, r)
|
||||
s.Add(s, e)
|
||||
s.Mul(s, inv)
|
||||
s.Mod(s, N)
|
||||
|
||||
return s.Sign() != 0
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// copied from crypto/ecdsa
|
||||
func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
|
||||
orderBits := c.N.BitLen()
|
||||
orderBytes := (orderBits + 7) / 8
|
||||
if len(hash) > orderBytes {
|
||||
hash = hash[:orderBytes]
|
||||
}
|
||||
|
||||
ret := new(big.Int).SetBytes(hash)
|
||||
excess := len(hash)*8 - orderBits
|
||||
if excess > 0 {
|
||||
ret.Rsh(ret, uint(excess))
|
||||
}
|
||||
return ret
|
||||
}
|
447
_pkg.dev/crypto/rfc6979/ecdsa_test.go
Executable file
447
_pkg.dev/crypto/rfc6979/ecdsa_test.go
Executable file
|
@ -0,0 +1,447 @@
|
|||
package rfc6979_test
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"hash"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/o3labs/neo-utils/neoutils/rfc6979"
|
||||
)
|
||||
|
||||
type ecdsaFixture struct {
|
||||
name string
|
||||
key *ecdsaKey
|
||||
alg func() hash.Hash
|
||||
message string
|
||||
r, s string
|
||||
}
|
||||
|
||||
type ecdsaKey struct {
|
||||
key *ecdsa.PrivateKey
|
||||
subgroup int
|
||||
}
|
||||
|
||||
var p224 = &ecdsaKey{
|
||||
key: &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: elliptic.P224(),
|
||||
X: ecdsaLoadInt("00CF08DA5AD719E42707FA431292DEA11244D64FC51610D94B130D6C"),
|
||||
Y: ecdsaLoadInt("EEAB6F3DEBE455E3DBF85416F7030CBD94F34F2D6F232C69F3C1385A"),
|
||||
},
|
||||
D: ecdsaLoadInt("F220266E1105BFE3083E03EC7A3A654651F45E37167E88600BF257C1"),
|
||||
},
|
||||
subgroup: 224,
|
||||
}
|
||||
|
||||
var p256 = &ecdsaKey{
|
||||
key: &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: elliptic.P256(),
|
||||
X: ecdsaLoadInt("60FED4BA255A9D31C961EB74C6356D68C049B8923B61FA6CE669622E60F29FB6"),
|
||||
Y: ecdsaLoadInt("7903FE1008B8BC99A41AE9E95628BC64F2F1B20C2D7E9F5177A3C294D4462299"),
|
||||
},
|
||||
D: ecdsaLoadInt("C9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721"),
|
||||
},
|
||||
subgroup: 256,
|
||||
}
|
||||
|
||||
var p384 = &ecdsaKey{
|
||||
key: &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: elliptic.P384(),
|
||||
X: ecdsaLoadInt("EC3A4E415B4E19A4568618029F427FA5DA9A8BC4AE92E02E06AAE5286B300C64DEF8F0EA9055866064A254515480BC13"),
|
||||
Y: ecdsaLoadInt("8015D9B72D7D57244EA8EF9AC0C621896708A59367F9DFB9F54CA84B3F1C9DB1288B231C3AE0D4FE7344FD2533264720"),
|
||||
},
|
||||
D: ecdsaLoadInt("6B9D3DAD2E1B8C1C05B19875B6659F4DE23C3B667BF297BA9AA47740787137D896D5724E4C70A825F872C9EA60D2EDF5"),
|
||||
},
|
||||
subgroup: 384,
|
||||
}
|
||||
|
||||
var p521 = &ecdsaKey{
|
||||
key: &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: elliptic.P521(),
|
||||
X: ecdsaLoadInt("1894550D0785932E00EAA23B694F213F8C3121F86DC97A04E5A7167DB4E5BCD371123D46E45DB6B5D5370A7F20FB633155D38FFA16D2BD761DCAC474B9A2F5023A4"),
|
||||
Y: ecdsaLoadInt("0493101C962CD4D2FDDF782285E64584139C2F91B47F87FF82354D6630F746A28A0DB25741B5B34A828008B22ACC23F924FAAFBD4D33F81EA66956DFEAA2BFDFCF5"),
|
||||
},
|
||||
D: ecdsaLoadInt("0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538"),
|
||||
},
|
||||
subgroup: 521,
|
||||
}
|
||||
|
||||
var fixtures = []ecdsaFixture{
|
||||
// ECDSA, 224 Bits (Prime Field)
|
||||
// https://tools.ietf.org/html/rfc6979#appendix-A.2.4
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-1 #1",
|
||||
key: p224,
|
||||
alg: sha1.New,
|
||||
message: "sample",
|
||||
r: "22226F9D40A96E19C4A301CE5B74B115303C0F3A4FD30FC257FB57AC",
|
||||
s: "66D1CDD83E3AF75605DD6E2FEFF196D30AA7ED7A2EDF7AF475403D69",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-224 #1",
|
||||
key: p224,
|
||||
alg: sha256.New224,
|
||||
message: "sample",
|
||||
r: "1CDFE6662DDE1E4A1EC4CDEDF6A1F5A2FB7FBD9145C12113E6ABFD3E",
|
||||
s: "A6694FD7718A21053F225D3F46197CA699D45006C06F871808F43EBC",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-256 #1",
|
||||
key: p224,
|
||||
alg: sha256.New,
|
||||
message: "sample",
|
||||
r: "61AA3DA010E8E8406C656BC477A7A7189895E7E840CDFE8FF42307BA",
|
||||
s: "BC814050DAB5D23770879494F9E0A680DC1AF7161991BDE692B10101",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-384 #1",
|
||||
key: p224,
|
||||
alg: sha512.New384,
|
||||
message: "sample",
|
||||
r: "0B115E5E36F0F9EC81F1325A5952878D745E19D7BB3EABFABA77E953",
|
||||
s: "830F34CCDFE826CCFDC81EB4129772E20E122348A2BBD889A1B1AF1D",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-512 #1",
|
||||
key: p224,
|
||||
alg: sha512.New,
|
||||
message: "sample",
|
||||
r: "074BD1D979D5F32BF958DDC61E4FB4872ADCAFEB2256497CDAC30397",
|
||||
s: "A4CECA196C3D5A1FF31027B33185DC8EE43F288B21AB342E5D8EB084",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-1 #2",
|
||||
key: p224,
|
||||
alg: sha1.New,
|
||||
message: "test",
|
||||
r: "DEAA646EC2AF2EA8AD53ED66B2E2DDAA49A12EFD8356561451F3E21C",
|
||||
s: "95987796F6CF2062AB8135271DE56AE55366C045F6D9593F53787BD2",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-224 #2",
|
||||
key: p224,
|
||||
alg: sha256.New224,
|
||||
message: "test",
|
||||
r: "C441CE8E261DED634E4CF84910E4C5D1D22C5CF3B732BB204DBEF019",
|
||||
s: "902F42847A63BDC5F6046ADA114953120F99442D76510150F372A3F4",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-256 #2",
|
||||
key: p224,
|
||||
alg: sha256.New,
|
||||
message: "test",
|
||||
r: "AD04DDE87B84747A243A631EA47A1BA6D1FAA059149AD2440DE6FBA6",
|
||||
s: "178D49B1AE90E3D8B629BE3DB5683915F4E8C99FDF6E666CF37ADCFD",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-384 #2",
|
||||
key: p224,
|
||||
alg: sha512.New384,
|
||||
message: "test",
|
||||
r: "389B92682E399B26518A95506B52C03BC9379A9DADF3391A21FB0EA4",
|
||||
s: "414A718ED3249FF6DBC5B50C27F71F01F070944DA22AB1F78F559AAB",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P224/SHA-512 #2",
|
||||
key: p224,
|
||||
alg: sha512.New,
|
||||
message: "test",
|
||||
r: "049F050477C5ADD858CAC56208394B5A55BAEBBE887FDF765047C17C",
|
||||
s: "077EB13E7005929CEFA3CD0403C7CDCC077ADF4E44F3C41B2F60ECFF",
|
||||
},
|
||||
// ECDSA, 256 Bits (Prime Field)
|
||||
// https://tools.ietf.org/html/rfc6979#appendix-A.2.5
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-1 #1",
|
||||
key: p256,
|
||||
alg: sha1.New,
|
||||
message: "sample",
|
||||
r: "61340C88C3AAEBEB4F6D667F672CA9759A6CCAA9FA8811313039EE4A35471D32",
|
||||
s: "6D7F147DAC089441BB2E2FE8F7A3FA264B9C475098FDCF6E00D7C996E1B8B7EB",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-224 #1",
|
||||
key: p256,
|
||||
alg: sha256.New224,
|
||||
message: "sample",
|
||||
r: "53B2FFF5D1752B2C689DF257C04C40A587FABABB3F6FC2702F1343AF7CA9AA3F",
|
||||
s: "B9AFB64FDC03DC1A131C7D2386D11E349F070AA432A4ACC918BEA988BF75C74C",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-256 #1",
|
||||
key: p256,
|
||||
alg: sha256.New,
|
||||
message: "sample",
|
||||
r: "EFD48B2AACB6A8FD1140DD9CD45E81D69D2C877B56AAF991C34D0EA84EAF3716",
|
||||
s: "F7CB1C942D657C41D436C7A1B6E29F65F3E900DBB9AFF4064DC4AB2F843ACDA8",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-384 #1",
|
||||
key: p256,
|
||||
alg: sha512.New384,
|
||||
message: "sample",
|
||||
r: "0EAFEA039B20E9B42309FB1D89E213057CBF973DC0CFC8F129EDDDC800EF7719",
|
||||
s: "4861F0491E6998B9455193E34E7B0D284DDD7149A74B95B9261F13ABDE940954",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-512 #1",
|
||||
key: p256,
|
||||
alg: sha512.New,
|
||||
message: "sample",
|
||||
r: "8496A60B5E9B47C825488827E0495B0E3FA109EC4568FD3F8D1097678EB97F00",
|
||||
s: "2362AB1ADBE2B8ADF9CB9EDAB740EA6049C028114F2460F96554F61FAE3302FE",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-1 #2",
|
||||
key: p256,
|
||||
alg: sha1.New,
|
||||
message: "test",
|
||||
r: "0CBCC86FD6ABD1D99E703E1EC50069EE5C0B4BA4B9AC60E409E8EC5910D81A89",
|
||||
s: "01B9D7B73DFAA60D5651EC4591A0136F87653E0FD780C3B1BC872FFDEAE479B1",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-224 #2",
|
||||
key: p256,
|
||||
alg: sha256.New224,
|
||||
message: "test",
|
||||
r: "C37EDB6F0AE79D47C3C27E962FA269BB4F441770357E114EE511F662EC34A692",
|
||||
s: "C820053A05791E521FCAAD6042D40AEA1D6B1A540138558F47D0719800E18F2D",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-256 #2",
|
||||
key: p256,
|
||||
alg: sha256.New,
|
||||
message: "test",
|
||||
r: "F1ABB023518351CD71D881567B1EA663ED3EFCF6C5132B354F28D3B0B7D38367",
|
||||
s: "019F4113742A2B14BD25926B49C649155F267E60D3814B4C0CC84250E46F0083",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-384 #2",
|
||||
key: p256,
|
||||
alg: sha512.New384,
|
||||
message: "test",
|
||||
r: "83910E8B48BB0C74244EBDF7F07A1C5413D61472BD941EF3920E623FBCCEBEB6",
|
||||
s: "8DDBEC54CF8CD5874883841D712142A56A8D0F218F5003CB0296B6B509619F2C",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P256/SHA-512 #2",
|
||||
key: p256,
|
||||
alg: sha512.New,
|
||||
message: "test",
|
||||
r: "461D93F31B6540894788FD206C07CFA0CC35F46FA3C91816FFF1040AD1581A04",
|
||||
s: "39AF9F15DE0DB8D97E72719C74820D304CE5226E32DEDAE67519E840D1194E55",
|
||||
},
|
||||
// ECDSA, 384 Bits (Prime Field)
|
||||
// https://tools.ietf.org/html/rfc6979#appendix-A.2.6
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-1 #1",
|
||||
key: p384,
|
||||
alg: sha1.New,
|
||||
message: "sample",
|
||||
r: "EC748D839243D6FBEF4FC5C4859A7DFFD7F3ABDDF72014540C16D73309834FA37B9BA002899F6FDA3A4A9386790D4EB2",
|
||||
s: "A3BCFA947BEEF4732BF247AC17F71676CB31A847B9FF0CBC9C9ED4C1A5B3FACF26F49CA031D4857570CCB5CA4424A443",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-224 #1",
|
||||
key: p384,
|
||||
alg: sha256.New224,
|
||||
message: "sample",
|
||||
r: "42356E76B55A6D9B4631C865445DBE54E056D3B3431766D0509244793C3F9366450F76EE3DE43F5A125333A6BE060122",
|
||||
s: "9DA0C81787064021E78DF658F2FBB0B042BF304665DB721F077A4298B095E4834C082C03D83028EFBF93A3C23940CA8D",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-256 #1",
|
||||
key: p384,
|
||||
alg: sha256.New,
|
||||
message: "sample",
|
||||
r: "21B13D1E013C7FA1392D03C5F99AF8B30C570C6F98D4EA8E354B63A21D3DAA33BDE1E888E63355D92FA2B3C36D8FB2CD",
|
||||
s: "F3AA443FB107745BF4BD77CB3891674632068A10CA67E3D45DB2266FA7D1FEEBEFDC63ECCD1AC42EC0CB8668A4FA0AB0",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-384 #1",
|
||||
key: p384,
|
||||
alg: sha512.New384,
|
||||
message: "sample",
|
||||
r: "94EDBB92A5ECB8AAD4736E56C691916B3F88140666CE9FA73D64C4EA95AD133C81A648152E44ACF96E36DD1E80FABE46",
|
||||
s: "99EF4AEB15F178CEA1FE40DB2603138F130E740A19624526203B6351D0A3A94FA329C145786E679E7B82C71A38628AC8",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-512 #1",
|
||||
key: p384,
|
||||
alg: sha512.New,
|
||||
message: "sample",
|
||||
r: "ED0959D5880AB2D869AE7F6C2915C6D60F96507F9CB3E047C0046861DA4A799CFE30F35CC900056D7C99CD7882433709",
|
||||
s: "512C8CCEEE3890A84058CE1E22DBC2198F42323CE8ACA9135329F03C068E5112DC7CC3EF3446DEFCEB01A45C2667FDD5",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-1 #2",
|
||||
key: p384,
|
||||
alg: sha1.New,
|
||||
message: "test",
|
||||
r: "4BC35D3A50EF4E30576F58CD96CE6BF638025EE624004A1F7789A8B8E43D0678ACD9D29876DAF46638645F7F404B11C7",
|
||||
s: "D5A6326C494ED3FF614703878961C0FDE7B2C278F9A65FD8C4B7186201A2991695BA1C84541327E966FA7B50F7382282",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-224 #2",
|
||||
key: p384,
|
||||
alg: sha256.New224,
|
||||
message: "test",
|
||||
r: "E8C9D0B6EA72A0E7837FEA1D14A1A9557F29FAA45D3E7EE888FC5BF954B5E62464A9A817C47FF78B8C11066B24080E72",
|
||||
s: "07041D4A7A0379AC7232FF72E6F77B6DDB8F09B16CCE0EC3286B2BD43FA8C6141C53EA5ABEF0D8231077A04540A96B66",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-256 #2",
|
||||
key: p384,
|
||||
alg: sha256.New,
|
||||
message: "test",
|
||||
r: "6D6DEFAC9AB64DABAFE36C6BF510352A4CC27001263638E5B16D9BB51D451559F918EEDAF2293BE5B475CC8F0188636B",
|
||||
s: "2D46F3BECBCC523D5F1A1256BF0C9B024D879BA9E838144C8BA6BAEB4B53B47D51AB373F9845C0514EEFB14024787265",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-384 #2",
|
||||
key: p384,
|
||||
alg: sha512.New384,
|
||||
message: "test",
|
||||
r: "8203B63D3C853E8D77227FB377BCF7B7B772E97892A80F36AB775D509D7A5FEB0542A7F0812998DA8F1DD3CA3CF023DB",
|
||||
s: "DDD0760448D42D8A43AF45AF836FCE4DE8BE06B485E9B61B827C2F13173923E06A739F040649A667BF3B828246BAA5A5",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P384/SHA-512 #2",
|
||||
key: p384,
|
||||
alg: sha512.New,
|
||||
message: "test",
|
||||
r: "A0D5D090C9980FAF3C2CE57B7AE951D31977DD11C775D314AF55F76C676447D06FB6495CD21B4B6E340FC236584FB277",
|
||||
s: "976984E59B4C77B0E8E4460DCA3D9F20E07B9BB1F63BEEFAF576F6B2E8B224634A2092CD3792E0159AD9CEE37659C736",
|
||||
},
|
||||
// ECDSA, 521 Bits (Prime Field)
|
||||
// https://tools.ietf.org/html/rfc6979#appendix-A.2.7
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-1 #1",
|
||||
key: p521,
|
||||
alg: sha1.New,
|
||||
message: "sample",
|
||||
r: "0343B6EC45728975EA5CBA6659BBB6062A5FF89EEA58BE3C80B619F322C87910FE092F7D45BB0F8EEE01ED3F20BABEC079D202AE677B243AB40B5431D497C55D75D",
|
||||
s: "0E7B0E675A9B24413D448B8CC119D2BF7B2D2DF032741C096634D6D65D0DBE3D5694625FB9E8104D3B842C1B0E2D0B98BEA19341E8676AEF66AE4EBA3D5475D5D16",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-224 #1",
|
||||
key: p521,
|
||||
alg: sha256.New224,
|
||||
message: "sample",
|
||||
r: "1776331CFCDF927D666E032E00CF776187BC9FDD8E69D0DABB4109FFE1B5E2A30715F4CC923A4A5E94D2503E9ACFED92857B7F31D7152E0F8C00C15FF3D87E2ED2E",
|
||||
s: "050CB5265417FE2320BBB5A122B8E1A32BD699089851128E360E620A30C7E17BA41A666AF126CE100E5799B153B60528D5300D08489CA9178FB610A2006C254B41F",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-256 #1",
|
||||
key: p521,
|
||||
alg: sha256.New,
|
||||
message: "sample",
|
||||
r: "1511BB4D675114FE266FC4372B87682BAECC01D3CC62CF2303C92B3526012659D16876E25C7C1E57648F23B73564D67F61C6F14D527D54972810421E7D87589E1A7",
|
||||
s: "04A171143A83163D6DF460AAF61522695F207A58B95C0644D87E52AA1A347916E4F7A72930B1BC06DBE22CE3F58264AFD23704CBB63B29B931F7DE6C9D949A7ECFC",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-384 #1",
|
||||
key: p521,
|
||||
alg: sha512.New384,
|
||||
message: "sample",
|
||||
r: "1EA842A0E17D2DE4F92C15315C63DDF72685C18195C2BB95E572B9C5136CA4B4B576AD712A52BE9730627D16054BA40CC0B8D3FF035B12AE75168397F5D50C67451",
|
||||
s: "1F21A3CEE066E1961025FB048BD5FE2B7924D0CD797BABE0A83B66F1E35EEAF5FDE143FA85DC394A7DEE766523393784484BDF3E00114A1C857CDE1AA203DB65D61",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-512 #1",
|
||||
key: p521,
|
||||
alg: sha512.New,
|
||||
message: "sample",
|
||||
r: "0C328FAFCBD79DD77850370C46325D987CB525569FB63C5D3BC53950E6D4C5F174E25A1EE9017B5D450606ADD152B534931D7D4E8455CC91F9B15BF05EC36E377FA",
|
||||
s: "0617CCE7CF5064806C467F678D3B4080D6F1CC50AF26CA209417308281B68AF282623EAA63E5B5C0723D8B8C37FF0777B1A20F8CCB1DCCC43997F1EE0E44DA4A67A",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-1 #2",
|
||||
key: p521,
|
||||
alg: sha1.New,
|
||||
message: "test",
|
||||
r: "13BAD9F29ABE20DE37EBEB823C252CA0F63361284015A3BF430A46AAA80B87B0693F0694BD88AFE4E661FC33B094CD3B7963BED5A727ED8BD6A3A202ABE009D0367",
|
||||
s: "1E9BB81FF7944CA409AD138DBBEE228E1AFCC0C890FC78EC8604639CB0DBDC90F717A99EAD9D272855D00162EE9527567DD6A92CBD629805C0445282BBC916797FF",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-224 #2",
|
||||
key: p521,
|
||||
alg: sha256.New224,
|
||||
message: "test",
|
||||
r: "1C7ED902E123E6815546065A2C4AF977B22AA8EADDB68B2C1110E7EA44D42086BFE4A34B67DDC0E17E96536E358219B23A706C6A6E16BA77B65E1C595D43CAE17FB",
|
||||
s: "177336676304FCB343CE028B38E7B4FBA76C1C1B277DA18CAD2A8478B2A9A9F5BEC0F3BA04F35DB3E4263569EC6AADE8C92746E4C82F8299AE1B8F1739F8FD519A4",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-256 #2",
|
||||
key: p521,
|
||||
alg: sha256.New,
|
||||
message: "test",
|
||||
r: "00E871C4A14F993C6C7369501900C4BC1E9C7B0B4BA44E04868B30B41D8071042EB28C4C250411D0CE08CD197E4188EA4876F279F90B3D8D74A3C76E6F1E4656AA8",
|
||||
s: "0CD52DBAA33B063C3A6CD8058A1FB0A46A4754B034FCC644766CA14DA8CA5CA9FDE00E88C1AD60CCBA759025299079D7A427EC3CC5B619BFBC828E7769BCD694E86",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-384 #2",
|
||||
key: p521,
|
||||
alg: sha512.New384,
|
||||
message: "test",
|
||||
r: "14BEE21A18B6D8B3C93FAB08D43E739707953244FDBE924FA926D76669E7AC8C89DF62ED8975C2D8397A65A49DCC09F6B0AC62272741924D479354D74FF6075578C",
|
||||
s: "133330865C067A0EAF72362A65E2D7BC4E461E8C8995C3B6226A21BD1AA78F0ED94FE536A0DCA35534F0CD1510C41525D163FE9D74D134881E35141ED5E8E95B979",
|
||||
},
|
||||
ecdsaFixture{
|
||||
name: "P521/SHA-512 #2",
|
||||
key: p521,
|
||||
alg: sha512.New,
|
||||
message: "test",
|
||||
r: "13E99020ABF5CEE7525D16B69B229652AB6BDF2AFFCAEF38773B4B7D08725F10CDB93482FDCC54EDCEE91ECA4166B2A7C6265EF0CE2BD7051B7CEF945BABD47EE6D",
|
||||
s: "1FBD0013C674AA79CB39849527916CE301C66EA7CE8B80682786AD60F98F7E78A19CA69EFF5C57400E3B3A0AD66CE0978214D13BAF4E9AC60752F7B155E2DE4DCE3",
|
||||
},
|
||||
}
|
||||
|
||||
func TestECDSA(t *testing.T) {
|
||||
for _, f := range fixtures {
|
||||
testEcsaFixture(&f, t)
|
||||
}
|
||||
}
|
||||
|
||||
func ecdsaLoadInt(s string) (n *big.Int) {
|
||||
n, _ = new(big.Int).SetString(s, 16)
|
||||
return
|
||||
}
|
||||
|
||||
func testEcsaFixture(f *ecdsaFixture, t *testing.T) {
|
||||
t.Logf("Testing %s", f.name)
|
||||
|
||||
h := f.alg()
|
||||
h.Write([]byte(f.message))
|
||||
digest := h.Sum(nil)
|
||||
|
||||
g := f.key.subgroup / 8
|
||||
if len(digest) > g {
|
||||
digest = digest[0:g]
|
||||
}
|
||||
|
||||
r, s, err := rfc6979.SignECDSA(f.key.key, digest, f.alg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
expectedR := ecdsaLoadInt(f.r)
|
||||
expectedS := ecdsaLoadInt(f.s)
|
||||
|
||||
if r.Cmp(expectedR) != 0 {
|
||||
t.Errorf("%s: Expected R of %X, got %X", f.name, expectedR, r)
|
||||
}
|
||||
|
||||
if s.Cmp(expectedS) != 0 {
|
||||
t.Errorf("%s: Expected S of %X, got %X", f.name, expectedS, s)
|
||||
}
|
||||
}
|
76
_pkg.dev/crypto/rfc6979/example_test.go
Executable file
76
_pkg.dev/crypto/rfc6979/example_test.go
Executable file
|
@ -0,0 +1,76 @@
|
|||
package rfc6979
|
||||
|
||||
import (
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"crypto/sha512"
|
||||
"fmt"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/elliptic"
|
||||
)
|
||||
|
||||
// Generates a 521-bit ECDSA key, uses SHA-512 to sign a message, then verifies
|
||||
// it.
|
||||
func ExampleSignECDSA() {
|
||||
// Generate a key pair.
|
||||
// You need a high-quality PRNG for this.
|
||||
curve := elliptic.NewEllipticCurve(elliptic.Secp256r1)
|
||||
k, err := ecdsa.GenerateKey(curve, rand.Reader)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Hash a message.
|
||||
alg := sha512.New()
|
||||
_, _ = alg.Write([]byte("I am a potato."))
|
||||
hash := alg.Sum(nil)
|
||||
|
||||
// Sign the message. You don't need a PRNG for this.
|
||||
|
||||
r, s, err := SignECDSA(curve, k.D.Bytes(), hash, sha512.New)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if !ecdsa.Verify(&k.PublicKey, hash, r, s) {
|
||||
fmt.Println("Invalid signature!")
|
||||
}
|
||||
}
|
||||
|
||||
// Generates a 1024-bit DSA key, uses SHA-1 to sign a message, then verifies it.
|
||||
func ExampleSignDSA() {
|
||||
// Here I'm generating some DSA params, but you should really pre-generate
|
||||
// these and re-use them, since this takes a long time and isn't necessary.
|
||||
k := new(dsa.PrivateKey)
|
||||
dsa.GenerateParameters(&k.Parameters, rand.Reader, dsa.L1024N160)
|
||||
|
||||
// Generate a key pair.
|
||||
// You need a high-quality PRNG for this.
|
||||
err := dsa.GenerateKey(k, rand.Reader)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Hash a message.
|
||||
alg := sha1.New()
|
||||
_, _ = alg.Write([]byte("I am a potato."))
|
||||
hash := alg.Sum(nil)
|
||||
|
||||
// Sign the message. You don't need a PRNG for this.
|
||||
r, s, err := SignDSA(k, hash, sha1.New)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if !dsa.Verify(&k.PublicKey, hash, r, s) {
|
||||
fmt.Println("Invalid signature!")
|
||||
}
|
||||
|
||||
}
|
119
_pkg.dev/crypto/rfc6979/rfc6979.go
Executable file
119
_pkg.dev/crypto/rfc6979/rfc6979.go
Executable file
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
Package rfc6979 is an implementation of RFC 6979's deterministic DSA.
|
||||
|
||||
Such signatures are compatible with standard Digital Signature Algorithm
|
||||
(DSA) and Elliptic Curve Digital Signature Algorithm (ECDSA) digital
|
||||
signatures and can be processed with unmodified verifiers, which need not be
|
||||
aware of the procedure described therein. Deterministic signatures retain
|
||||
the cryptographic security features associated with digital signatures but
|
||||
can be more easily implemented in various environments, since they do not
|
||||
need access to a source of high-quality randomness.
|
||||
|
||||
(https://tools.ietf.org/html/rfc6979)
|
||||
|
||||
Provides functions similar to crypto/dsa and crypto/ecdsa.
|
||||
*/
|
||||
package rfc6979
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"hash"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// mac returns an HMAC of the given key and message.
|
||||
func mac(alg func() hash.Hash, k, m, buf []byte) []byte {
|
||||
h := hmac.New(alg, k)
|
||||
h.Write(m)
|
||||
return h.Sum(buf[:0])
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc6979#section-2.3.2
|
||||
func bits2int(in []byte, qlen int) *big.Int {
|
||||
vlen := len(in) * 8
|
||||
v := new(big.Int).SetBytes(in)
|
||||
if vlen > qlen {
|
||||
v = new(big.Int).Rsh(v, uint(vlen-qlen))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc6979#section-2.3.3
|
||||
func int2octets(v *big.Int, rolen int) []byte {
|
||||
out := v.Bytes()
|
||||
|
||||
// pad with zeros if it's too short
|
||||
if len(out) < rolen {
|
||||
out2 := make([]byte, rolen)
|
||||
copy(out2[rolen-len(out):], out)
|
||||
return out2
|
||||
}
|
||||
|
||||
// drop most significant bytes if it's too long
|
||||
if len(out) > rolen {
|
||||
out2 := make([]byte, rolen)
|
||||
copy(out2, out[len(out)-rolen:])
|
||||
return out2
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc6979#section-2.3.4
|
||||
func bits2octets(in []byte, q *big.Int, qlen, rolen int) []byte {
|
||||
z1 := bits2int(in, qlen)
|
||||
z2 := new(big.Int).Sub(z1, q)
|
||||
if z2.Sign() < 0 {
|
||||
return int2octets(z1, rolen)
|
||||
}
|
||||
return int2octets(z2, rolen)
|
||||
}
|
||||
|
||||
var one = big.NewInt(1)
|
||||
|
||||
// https://tools.ietf.org/html/rfc6979#section-3.2
|
||||
func generateSecret(q, x *big.Int, alg func() hash.Hash, hash []byte, test func(*big.Int) bool) {
|
||||
qlen := q.BitLen()
|
||||
holen := alg().Size()
|
||||
rolen := (qlen + 7) >> 3
|
||||
bx := append(int2octets(x, rolen), bits2octets(hash, q, qlen, rolen)...)
|
||||
|
||||
// Step B
|
||||
v := bytes.Repeat([]byte{0x01}, holen)
|
||||
|
||||
// Step C
|
||||
k := bytes.Repeat([]byte{0x00}, holen)
|
||||
|
||||
// Step D
|
||||
k = mac(alg, k, append(append(v, 0x00), bx...), k)
|
||||
|
||||
// Step E
|
||||
v = mac(alg, k, v, v)
|
||||
|
||||
// Step F
|
||||
k = mac(alg, k, append(append(v, 0x01), bx...), k)
|
||||
|
||||
// Step G
|
||||
v = mac(alg, k, v, v)
|
||||
|
||||
// Step H
|
||||
for {
|
||||
// Step H1
|
||||
var t []byte
|
||||
|
||||
// Step H2
|
||||
for len(t) < qlen/8 {
|
||||
v = mac(alg, k, v, v)
|
||||
t = append(t, v...)
|
||||
}
|
||||
|
||||
// Step H3
|
||||
secret := bits2int(t, qlen)
|
||||
if secret.Cmp(one) >= 0 && secret.Cmp(q) < 0 && test(secret) {
|
||||
return
|
||||
}
|
||||
k = mac(alg, k, append(v, 0x00), k)
|
||||
v = mac(alg, k, v, v)
|
||||
}
|
||||
}
|
28
_pkg.dev/crypto/rfc6979/rfc6979_test.go
Executable file
28
_pkg.dev/crypto/rfc6979/rfc6979_test.go
Executable file
|
@ -0,0 +1,28 @@
|
|||
package rfc6979
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// https://tools.ietf.org/html/rfc6979#appendix-A.1
|
||||
func TestGenerateSecret(t *testing.T) {
|
||||
q, _ := new(big.Int).SetString("4000000000000000000020108A2E0CC0D99F8A5EF", 16)
|
||||
|
||||
x, _ := new(big.Int).SetString("09A4D6792295A7F730FC3F2B49CBC0F62E862272F", 16)
|
||||
|
||||
hash, _ := hex.DecodeString("AF2BDBE1AA9B6EC1E2ADE1D694F41FC71A831D0268E9891562113D8A62ADD1BF")
|
||||
|
||||
expected, _ := new(big.Int).SetString("23AF4074C90A02B3FE61D286D5C87F425E6BDD81B", 16)
|
||||
var actual *big.Int
|
||||
generateSecret(q, x, sha256.New, hash, func(k *big.Int) bool {
|
||||
actual = k
|
||||
return true
|
||||
})
|
||||
|
||||
if actual.Cmp(expected) != 0 {
|
||||
t.Errorf("Expected %x, got %x", expected, actual)
|
||||
}
|
||||
}
|
114
_pkg.dev/database/leveldb.go
Normal file
114
_pkg.dev/database/leveldb.go
Normal file
|
@ -0,0 +1,114 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
ldbutil "github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
//DbDir is the folder which all database files will be put under
|
||||
// Structure /DbDir/net
|
||||
const DbDir = "db/"
|
||||
|
||||
// LDB represents a leveldb object
|
||||
type LDB struct {
|
||||
db *leveldb.DB
|
||||
Path string
|
||||
}
|
||||
|
||||
// ErrNotFound means that the value was not found in the db
|
||||
var ErrNotFound = errors.New("value not found for that key")
|
||||
|
||||
// Database contains all methods needed for an object to be a database
|
||||
type Database interface {
|
||||
// Has checks whether the key is in the database
|
||||
Has(key []byte) (bool, error)
|
||||
// Put adds the key value pair into the pair
|
||||
Put(key []byte, value []byte) error
|
||||
// Get returns the value for the given key
|
||||
Get(key []byte) ([]byte, error)
|
||||
// Delete deletes the given value for the key from the database
|
||||
Delete(key []byte) error
|
||||
//Prefix returns all values that start with key
|
||||
Prefix(key []byte) ([][]byte, error)
|
||||
// Close closes the underlying db object
|
||||
Close() error
|
||||
}
|
||||
|
||||
// New will return a new leveldb instance
|
||||
func New(path string) (*LDB, error) {
|
||||
dbPath := DbDir + path
|
||||
db, err := leveldb.OpenFile(dbPath, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
|
||||
db, err = leveldb.RecoverFile(path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &LDB{
|
||||
db,
|
||||
dbPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Has implements the database interface
|
||||
func (l *LDB) Has(key []byte) (bool, error) {
|
||||
return l.db.Has(key, nil)
|
||||
}
|
||||
|
||||
// Put implements the database interface
|
||||
func (l *LDB) Put(key []byte, value []byte) error {
|
||||
return l.db.Put(key, value, nil)
|
||||
}
|
||||
|
||||
// Get implements the database interface
|
||||
func (l *LDB) Get(key []byte) ([]byte, error) {
|
||||
val, err := l.db.Get(key, nil)
|
||||
if err == nil {
|
||||
return val, nil
|
||||
}
|
||||
if err == leveldb.ErrNotFound {
|
||||
return val, ErrNotFound
|
||||
}
|
||||
return val, err
|
||||
|
||||
}
|
||||
|
||||
// Delete implements the database interface
|
||||
func (l *LDB) Delete(key []byte) error {
|
||||
return l.db.Delete(key, nil)
|
||||
}
|
||||
|
||||
// Close implements the database interface
|
||||
func (l *LDB) Close() error {
|
||||
return l.db.Close()
|
||||
}
|
||||
|
||||
// Prefix implements the database interface
|
||||
func (l *LDB) Prefix(key []byte) ([][]byte, error) {
|
||||
|
||||
var results [][]byte
|
||||
|
||||
iter := l.db.NewIterator(ldbutil.BytesPrefix(key), nil)
|
||||
for iter.Next() {
|
||||
|
||||
value := iter.Value()
|
||||
|
||||
// Copy the data, as we cannot modify it
|
||||
// Once the iter has been released
|
||||
deref := make([]byte, len(value))
|
||||
|
||||
copy(deref, value)
|
||||
|
||||
// Append result
|
||||
results = append(results, deref)
|
||||
|
||||
}
|
||||
iter.Release()
|
||||
err := iter.Error()
|
||||
return results, err
|
||||
}
|
91
_pkg.dev/database/leveldb_test.go
Normal file
91
_pkg.dev/database/leveldb_test.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package database_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/database"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const path = "temp"
|
||||
|
||||
func cleanup(db *database.LDB) {
|
||||
db.Close()
|
||||
os.RemoveAll(database.DbDir)
|
||||
}
|
||||
func TestDBCreate(t *testing.T) {
|
||||
|
||||
db, err := database.New(path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.NotEqual(t, nil, db)
|
||||
cleanup(db)
|
||||
}
|
||||
func TestPutGet(t *testing.T) {
|
||||
|
||||
db, err := database.New(path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
key := []byte("Hello")
|
||||
value := []byte("World")
|
||||
|
||||
err = db.Put(key, value)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
res, err := db.Get(key)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, value, res)
|
||||
cleanup(db)
|
||||
}
|
||||
func TestPutDelete(t *testing.T) {
|
||||
|
||||
db, err := database.New(path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
key := []byte("Hello")
|
||||
value := []byte("World")
|
||||
|
||||
err = db.Put(key, value)
|
||||
|
||||
err = db.Delete(key)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
res, err := db.Get(key)
|
||||
|
||||
assert.Equal(t, database.ErrNotFound, err)
|
||||
assert.Equal(t, res, []byte{})
|
||||
cleanup(db)
|
||||
}
|
||||
|
||||
func TestHas(t *testing.T) {
|
||||
|
||||
db, err := database.New(path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
res, err := db.Has([]byte("NotExist"))
|
||||
assert.Equal(t, res, false)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
key := []byte("Hello")
|
||||
value := []byte("World")
|
||||
|
||||
err = db.Put(key, value)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
res, err = db.Has(key)
|
||||
assert.Equal(t, res, true)
|
||||
assert.Equal(t, err, nil)
|
||||
cleanup(db)
|
||||
|
||||
}
|
||||
func TestDBClose(t *testing.T) {
|
||||
|
||||
db, err := database.New(path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = db.Close()
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
cleanup(db)
|
||||
}
|
50
_pkg.dev/database/table.go
Normal file
50
_pkg.dev/database/table.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package database
|
||||
|
||||
//Table is an abstract data structure built on top of a db
|
||||
type Table struct {
|
||||
prefix []byte
|
||||
db Database
|
||||
}
|
||||
|
||||
//NewTable creates a new table on the given database
|
||||
func NewTable(db Database, prefix []byte) *Table {
|
||||
return &Table{
|
||||
prefix,
|
||||
db,
|
||||
}
|
||||
}
|
||||
|
||||
// Has implements the database interface
|
||||
func (t *Table) Has(key []byte) (bool, error) {
|
||||
prefixedKey := append(t.prefix, key...)
|
||||
return t.db.Has(prefixedKey)
|
||||
}
|
||||
|
||||
// Put implements the database interface
|
||||
func (t *Table) Put(key []byte, value []byte) error {
|
||||
prefixedKey := append(t.prefix, key...)
|
||||
return t.db.Put(prefixedKey, value)
|
||||
}
|
||||
|
||||
// Get implements the database interface
|
||||
func (t *Table) Get(key []byte) ([]byte, error) {
|
||||
prefixedKey := append(t.prefix, key...)
|
||||
return t.db.Get(prefixedKey)
|
||||
}
|
||||
|
||||
// Delete implements the database interface
|
||||
func (t *Table) Delete(key []byte) error {
|
||||
prefixedKey := append(t.prefix, key...)
|
||||
return t.db.Delete(prefixedKey)
|
||||
}
|
||||
|
||||
// Close implements the database interface
|
||||
func (t *Table) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prefix implements the database interface
|
||||
func (t *Table) Prefix(key []byte) ([][]byte, error) {
|
||||
prefixedKey := append(t.prefix, key...)
|
||||
return t.db.Prefix(prefixedKey)
|
||||
}
|
31
_pkg.dev/peer/config.go
Normal file
31
_pkg.dev/peer/config.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package peer
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
)
|
||||
|
||||
// LocalConfig specifies the properties that should be available for each remote peer
|
||||
type LocalConfig struct {
|
||||
Net protocol.Magic
|
||||
UserAgent string
|
||||
Services protocol.ServiceFlag
|
||||
Nonce uint32
|
||||
ProtocolVer protocol.Version
|
||||
Relay bool
|
||||
Port uint16
|
||||
|
||||
// pointer to config will keep the startheight updated
|
||||
StartHeight func() uint32
|
||||
|
||||
// Response Handlers
|
||||
OnHeader func(*Peer, *payload.HeadersMessage)
|
||||
OnGetHeaders func(*Peer, *payload.GetHeadersMessage)
|
||||
OnAddr func(*Peer, *payload.AddrMessage)
|
||||
OnGetAddr func(*Peer, *payload.GetAddrMessage)
|
||||
OnInv func(*Peer, *payload.InvMessage)
|
||||
OnGetData func(*Peer, *payload.GetDataMessage)
|
||||
OnBlock func(*Peer, *payload.BlockMessage)
|
||||
OnGetBlocks func(*Peer, *payload.GetBlocksMessage)
|
||||
OnTx func(*Peer, *payload.TXMessage)
|
||||
}
|
340
_pkg.dev/peer/peer.go
Normal file
340
_pkg.dev/peer/peer.go
Normal file
|
@ -0,0 +1,340 @@
|
|||
// This impl uses channels to simulate the queue handler with the actor model.
|
||||
// A suitable number k ,should be set for channel size, because if #numOfMsg > k,
|
||||
// we lose determinism. k chosen should be large enough that when filled, it shall indicate that
|
||||
// the peer has stopped responding, since we do not have a pingMSG, we will need another way to shut down
|
||||
// peers
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/command"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/peer/stall"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
const (
|
||||
maxOutboundConnections = 100
|
||||
protocolVer = protocol.DefaultVersion
|
||||
handshakeTimeout = 30 * time.Second
|
||||
idleTimeout = 5 * time.Minute // If no message received after idleTimeout, then peer disconnects
|
||||
|
||||
// nodes will have `responseTime` seconds to reply with a response
|
||||
responseTime = 120 * time.Second
|
||||
|
||||
// the stall detector will check every `tickerInterval` to see if messages
|
||||
// are overdue. Should be less than `responseTime`
|
||||
tickerInterval = 30 * time.Second
|
||||
|
||||
// The input buffer size is the amount of mesages that
|
||||
// can be buffered into the channel to receive at once before
|
||||
// blocking, and before determinism is broken
|
||||
inputBufferSize = 100
|
||||
|
||||
// The output buffer size is the amount of messages that
|
||||
// can be buffered into the channel to send at once before
|
||||
// blocking, and before determinism is broken.
|
||||
outputBufferSize = 100
|
||||
|
||||
// pingInterval = 20 * time.Second //Not implemented in neo clients
|
||||
)
|
||||
|
||||
var (
|
||||
errHandShakeTimeout = errors.New("Handshake timed out, peers have " + string(handshakeTimeout) + " Seconds to Complete the handshake")
|
||||
)
|
||||
|
||||
// Peer represents a peer on the neo network
|
||||
type Peer struct {
|
||||
config LocalConfig
|
||||
conn net.Conn
|
||||
|
||||
startHeight uint32
|
||||
|
||||
// atomic vals
|
||||
disconnected int32
|
||||
|
||||
//unchangeable state: concurrent safe
|
||||
addr string
|
||||
protoVer protocol.Version
|
||||
port uint16
|
||||
inbound bool
|
||||
userAgent string
|
||||
services protocol.ServiceFlag
|
||||
createdAt time.Time
|
||||
relay bool
|
||||
|
||||
statemutex sync.Mutex
|
||||
verackReceived bool
|
||||
versionKnown bool
|
||||
|
||||
*stall.Detector
|
||||
|
||||
inch chan func() // will handle all incoming connections from peer
|
||||
outch chan func() // will handle all outcoming connections from peer
|
||||
quitch chan struct{}
|
||||
}
|
||||
|
||||
// NewPeer returns a new NEO peer
|
||||
func NewPeer(con net.Conn, inbound bool, cfg LocalConfig) *Peer {
|
||||
return &Peer{
|
||||
inch: make(chan func(), inputBufferSize),
|
||||
outch: make(chan func(), outputBufferSize),
|
||||
quitch: make(chan struct{}, 1),
|
||||
inbound: inbound,
|
||||
config: cfg,
|
||||
conn: con,
|
||||
createdAt: time.Now(),
|
||||
startHeight: 0,
|
||||
addr: con.RemoteAddr().String(),
|
||||
Detector: stall.NewDetector(responseTime, tickerInterval),
|
||||
}
|
||||
}
|
||||
|
||||
// Write to a peer
|
||||
func (p *Peer) Write(msg wire.Messager) error {
|
||||
return wire.WriteMessage(p.conn, p.config.Net, msg)
|
||||
}
|
||||
|
||||
// Read to a peer
|
||||
func (p *Peer) Read() (wire.Messager, error) {
|
||||
return wire.ReadMessage(p.conn, p.config.Net)
|
||||
}
|
||||
|
||||
// Disconnect disconnects a peer and closes the connection
|
||||
func (p *Peer) Disconnect() {
|
||||
|
||||
// return if already disconnected
|
||||
if atomic.LoadInt32(&p.disconnected) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt32(&p.disconnected, 1)
|
||||
|
||||
p.Detector.Quit()
|
||||
close(p.quitch)
|
||||
p.conn.Close()
|
||||
|
||||
fmt.Println("Disconnected Peer with address", p.RemoteAddr().String())
|
||||
}
|
||||
|
||||
// Port returns the peers port
|
||||
func (p *Peer) Port() uint16 {
|
||||
return p.port
|
||||
}
|
||||
|
||||
// CreatedAt returns the time at which the connection was made
|
||||
func (p *Peer) CreatedAt() time.Time {
|
||||
return p.createdAt
|
||||
}
|
||||
|
||||
// Height returns the latest recorded height of this peer
|
||||
func (p *Peer) Height() uint32 {
|
||||
return p.startHeight
|
||||
}
|
||||
|
||||
// CanRelay returns true, if the peer can relay information
|
||||
func (p *Peer) CanRelay() bool {
|
||||
return p.relay
|
||||
}
|
||||
|
||||
// LocalAddr returns this node's local address
|
||||
func (p *Peer) LocalAddr() net.Addr {
|
||||
return p.conn.LocalAddr()
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address of the connected peer
|
||||
func (p *Peer) RemoteAddr() net.Addr {
|
||||
return p.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
// Services returns the services offered by the peer
|
||||
func (p *Peer) Services() protocol.ServiceFlag {
|
||||
return p.config.Services
|
||||
}
|
||||
|
||||
//Inbound returns true whether this peer is an inbound peer
|
||||
func (p *Peer) Inbound() bool {
|
||||
return p.inbound
|
||||
}
|
||||
|
||||
// IsVerackReceived returns true, if this node has
|
||||
// received a verack from this peer
|
||||
func (p *Peer) IsVerackReceived() bool {
|
||||
return p.verackReceived
|
||||
}
|
||||
|
||||
//NotifyDisconnect returns once the peer has disconnected
|
||||
// Blocking
|
||||
func (p *Peer) NotifyDisconnect() {
|
||||
<-p.quitch
|
||||
fmt.Println("Peer has just disconnected")
|
||||
}
|
||||
|
||||
//End of Exposed API functions//
|
||||
|
||||
// PingLoop not impl. in neo yet, adding it now
|
||||
// will cause this client to disconnect from all other implementations
|
||||
func (p *Peer) PingLoop() { /*not implemented in other neo clients*/ }
|
||||
|
||||
// Run is used to start communicating with the peer
|
||||
// completes the handshake and starts observing
|
||||
// for messages coming in
|
||||
func (p *Peer) Run() error {
|
||||
|
||||
err := p.Handshake()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go p.StartProtocol()
|
||||
go p.ReadLoop()
|
||||
go p.WriteLoop()
|
||||
|
||||
//go p.PingLoop() // since it is not implemented. It will disconnect all other impls.
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartProtocol run as a go-routine, will act as our queue for messages
|
||||
// should be ran after handshake
|
||||
func (p *Peer) StartProtocol() {
|
||||
loop:
|
||||
for atomic.LoadInt32(&p.disconnected) == 0 {
|
||||
select {
|
||||
case f := <-p.inch:
|
||||
f()
|
||||
case <-p.quitch:
|
||||
break loop
|
||||
case <-p.Detector.Quitch:
|
||||
fmt.Println("Peer stalled, disconnecting")
|
||||
break loop
|
||||
}
|
||||
}
|
||||
p.Disconnect()
|
||||
}
|
||||
|
||||
// ReadLoop Will block on the read until a message is read
|
||||
// Should only be called after handshake is complete
|
||||
// on a seperate go-routine.
|
||||
func (p *Peer) ReadLoop() {
|
||||
|
||||
idleTimer := time.AfterFunc(idleTimeout, func() {
|
||||
fmt.Println("Timing out peer")
|
||||
p.Disconnect()
|
||||
})
|
||||
|
||||
loop:
|
||||
for atomic.LoadInt32(&p.disconnected) == 0 {
|
||||
|
||||
idleTimer.Reset(idleTimeout) // reset timer on each loop
|
||||
|
||||
readmsg, err := p.Read()
|
||||
|
||||
// Message read; stop Timer
|
||||
idleTimer.Stop()
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Err on read", err) // This will also happen if Peer is disconnected
|
||||
break loop
|
||||
}
|
||||
|
||||
// Remove message as pending from the stall detector
|
||||
p.Detector.RemoveMessage(readmsg.Command())
|
||||
|
||||
switch msg := readmsg.(type) {
|
||||
|
||||
case *payload.VersionMessage:
|
||||
fmt.Println("Already received a Version, disconnecting. " + p.RemoteAddr().String())
|
||||
break loop // We have already done the handshake, break loop and disconnect
|
||||
case *payload.VerackMessage:
|
||||
if p.verackReceived {
|
||||
fmt.Println("Already received a Verack, disconnecting. " + p.RemoteAddr().String())
|
||||
break loop
|
||||
}
|
||||
p.statemutex.Lock() // This should not happen, however if it does, then we should set it.
|
||||
p.verackReceived = true
|
||||
p.statemutex.Unlock()
|
||||
case *payload.AddrMessage:
|
||||
p.OnAddr(msg)
|
||||
case *payload.GetAddrMessage:
|
||||
p.OnGetAddr(msg)
|
||||
case *payload.GetBlocksMessage:
|
||||
p.OnGetBlocks(msg)
|
||||
case *payload.BlockMessage:
|
||||
p.OnBlocks(msg)
|
||||
case *payload.HeadersMessage:
|
||||
p.OnHeaders(msg)
|
||||
case *payload.GetHeadersMessage:
|
||||
p.OnGetHeaders(msg)
|
||||
case *payload.InvMessage:
|
||||
p.OnInv(msg)
|
||||
case *payload.GetDataMessage:
|
||||
p.OnGetData(msg)
|
||||
case *payload.TXMessage:
|
||||
p.OnTX(msg)
|
||||
default:
|
||||
fmt.Println("Cannot recognise message", msg.Command()) //Do not disconnect peer, just Log Message
|
||||
}
|
||||
}
|
||||
|
||||
idleTimer.Stop()
|
||||
p.Disconnect()
|
||||
}
|
||||
|
||||
// WriteLoop will Queue all messages to be written to the peer.
|
||||
func (p *Peer) WriteLoop() {
|
||||
for atomic.LoadInt32(&p.disconnected) == 0 {
|
||||
select {
|
||||
case f := <-p.outch:
|
||||
f()
|
||||
case <-p.Detector.Quitch: // if the detector quits, disconnect peer
|
||||
p.Disconnect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Outgoing Requests
|
||||
|
||||
// RequestHeaders will write a getheaders to this peer
|
||||
func (p *Peer) RequestHeaders(hash util.Uint256) error {
|
||||
c := make(chan error, 0)
|
||||
p.outch <- func() {
|
||||
getHeaders, err := payload.NewGetHeadersMessage([]util.Uint256{hash}, util.Uint256{})
|
||||
err = p.Write(getHeaders)
|
||||
if err != nil {
|
||||
p.Detector.AddMessage(command.GetHeaders)
|
||||
}
|
||||
c <- err
|
||||
}
|
||||
return <-c
|
||||
}
|
||||
|
||||
// RequestBlocks will ask this peer for a set of blocks
|
||||
func (p *Peer) RequestBlocks(hashes []util.Uint256) error {
|
||||
c := make(chan error, 0)
|
||||
|
||||
p.outch <- func() {
|
||||
getdata, err := payload.NewGetDataMessage(payload.InvTypeBlock)
|
||||
err = getdata.AddHashes(hashes)
|
||||
if err != nil {
|
||||
c <- err
|
||||
return
|
||||
}
|
||||
|
||||
err = p.Write(getdata)
|
||||
if err != nil {
|
||||
p.Detector.AddMessage(command.GetData)
|
||||
}
|
||||
|
||||
c <- err
|
||||
}
|
||||
return <-c
|
||||
}
|
196
_pkg.dev/peer/peer_test.go
Normal file
196
_pkg.dev/peer/peer_test.go
Normal file
|
@ -0,0 +1,196 @@
|
|||
package peer_test
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/peer"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func returnConfig() peer.LocalConfig {
|
||||
|
||||
DefaultHeight := func() uint32 {
|
||||
return 10
|
||||
}
|
||||
|
||||
OnAddr := func(p *peer.Peer, msg *payload.AddrMessage) {}
|
||||
OnHeader := func(p *peer.Peer, msg *payload.HeadersMessage) {}
|
||||
OnGetHeaders := func(p *peer.Peer, msg *payload.GetHeadersMessage) {}
|
||||
OnInv := func(p *peer.Peer, msg *payload.InvMessage) {}
|
||||
OnGetData := func(p *peer.Peer, msg *payload.GetDataMessage) {}
|
||||
OnBlock := func(p *peer.Peer, msg *payload.BlockMessage) {}
|
||||
OnGetBlocks := func(p *peer.Peer, msg *payload.GetBlocksMessage) {}
|
||||
|
||||
return peer.LocalConfig{
|
||||
Net: protocol.MainNet,
|
||||
UserAgent: "NEO-GO-Default",
|
||||
Services: protocol.NodePeerService,
|
||||
Nonce: 1200,
|
||||
ProtocolVer: 0,
|
||||
Relay: false,
|
||||
Port: 10332,
|
||||
// pointer to config will keep the startheight updated for each version
|
||||
//Message we plan to send
|
||||
StartHeight: DefaultHeight,
|
||||
OnHeader: OnHeader,
|
||||
OnAddr: OnAddr,
|
||||
OnGetHeaders: OnGetHeaders,
|
||||
OnInv: OnInv,
|
||||
OnGetData: OnGetData,
|
||||
OnBlock: OnBlock,
|
||||
OnGetBlocks: OnGetBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandshake(t *testing.T) {
|
||||
address := ":20338"
|
||||
go func() {
|
||||
|
||||
conn, err := net.DialTimeout("tcp", address, 2*time.Second)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p := peer.NewPeer(conn, true, returnConfig())
|
||||
err = p.Run()
|
||||
verack, err := payload.NewVerackMessage()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
if err := p.Write(verack); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, true, p.IsVerackReceived())
|
||||
|
||||
}()
|
||||
|
||||
listener, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
listener.Close()
|
||||
}()
|
||||
|
||||
for {
|
||||
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("82.2.97.142"), Port: 20338}
|
||||
nonce := uint32(100)
|
||||
messageVer, err := payload.NewVersionMessage(tcpAddrMe, 2595770, false, protocol.DefaultVersion, protocol.UserAgent, nonce, protocol.NodePeerService)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := wire.WriteMessage(conn, protocol.MainNet, messageVer); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
readmsg, err := wire.ReadMessage(conn, protocol.MainNet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
version, ok := readmsg.(*payload.VersionMessage)
|
||||
if !ok {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nil, version)
|
||||
|
||||
messageVrck, err := payload.NewVerackMessage()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nil, messageVrck)
|
||||
|
||||
if err := wire.WriteMessage(conn, protocol.MainNet, messageVrck); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
readmsg, err = wire.ReadMessage(conn, protocol.MainNet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nil, readmsg)
|
||||
|
||||
verk, ok := readmsg.(*payload.VerackMessage)
|
||||
if !ok {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NotEqual(t, nil, verk)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestConfigurations(t *testing.T) {
|
||||
_, conn := net.Pipe()
|
||||
|
||||
inbound := true
|
||||
|
||||
config := returnConfig()
|
||||
|
||||
p := peer.NewPeer(conn, inbound, config)
|
||||
|
||||
// test inbound
|
||||
assert.Equal(t, inbound, p.Inbound())
|
||||
|
||||
// handshake not done, should be false
|
||||
assert.Equal(t, false, p.IsVerackReceived())
|
||||
|
||||
assert.Equal(t, config.Services, p.Services())
|
||||
|
||||
assert.Equal(t, config.Relay, p.CanRelay())
|
||||
|
||||
assert.WithinDuration(t, time.Now(), p.CreatedAt(), 1*time.Second)
|
||||
}
|
||||
|
||||
func TestPeerDisconnect(t *testing.T) {
|
||||
// Make sure everything is shutdown
|
||||
// Make sure timer is shutdown in stall detector too. Should maybe put this part of test into stall detector.
|
||||
|
||||
_, conn := net.Pipe()
|
||||
inbound := true
|
||||
config := returnConfig()
|
||||
p := peer.NewPeer(conn, inbound, config)
|
||||
|
||||
p.Disconnect()
|
||||
verack, err := payload.NewVerackMessage()
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = p.Write(verack)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// Check if stall detector is still running
|
||||
_, ok := <-p.Detector.Quitch
|
||||
assert.Equal(t, ok, false)
|
||||
}
|
||||
|
||||
func TestNotifyDisconnect(t *testing.T) {
|
||||
|
||||
_, conn := net.Pipe()
|
||||
inbound := true
|
||||
config := returnConfig()
|
||||
p := peer.NewPeer(conn, inbound, config)
|
||||
|
||||
p.Disconnect()
|
||||
p.NotifyDisconnect()
|
||||
// TestNotify uses default test timeout as the passing condition
|
||||
// Failure condition can be seen when you comment out p.Disconnect()
|
||||
}
|
132
_pkg.dev/peer/peerhandshake.go
Normal file
132
_pkg.dev/peer/peerhandshake.go
Normal file
|
@ -0,0 +1,132 @@
|
|||
package peer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
iputils "github.com/CityOfZion/neo-go/pkg/wire/util/ip"
|
||||
)
|
||||
|
||||
// Handshake will initiate a handshake with this peer
|
||||
func (p *Peer) Handshake() error {
|
||||
|
||||
handshakeErr := make(chan error, 1)
|
||||
go func() {
|
||||
if p.inbound {
|
||||
handshakeErr <- p.inboundHandShake()
|
||||
} else {
|
||||
handshakeErr <- p.outboundHandShake()
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-handshakeErr:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-time.After(handshakeTimeout):
|
||||
return errHandShakeTimeout
|
||||
}
|
||||
|
||||
// This is purely here for Logs
|
||||
if p.inbound {
|
||||
fmt.Println("inbound handshake with", p.RemoteAddr().String(), "successful")
|
||||
} else {
|
||||
|
||||
fmt.Println("outbound handshake with", p.RemoteAddr().String(), "successful")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this peer has an inbound conn (conn that is going into another peer)
|
||||
// then he has dialed and so, we must read the version message
|
||||
func (p *Peer) inboundHandShake() error {
|
||||
var err error
|
||||
if err := p.writeLocalVersionMSG(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readRemoteVersionMSG(); err != nil {
|
||||
return err
|
||||
}
|
||||
verack, err := payload.NewVerackMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = p.Write(verack)
|
||||
return p.readVerack()
|
||||
}
|
||||
func (p *Peer) outboundHandShake() error {
|
||||
var err error
|
||||
err = p.readRemoteVersionMSG()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.writeLocalVersionMSG()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.readVerack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
verack, err := payload.NewVerackMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Write(verack)
|
||||
}
|
||||
func (p *Peer) writeLocalVersionMSG() error {
|
||||
|
||||
nonce := p.config.Nonce
|
||||
relay := p.config.Relay
|
||||
port := int(p.config.Port)
|
||||
ua := p.config.UserAgent
|
||||
sh := p.config.StartHeight()
|
||||
services := p.config.Services
|
||||
proto := p.config.ProtocolVer
|
||||
ip := iputils.GetLocalIP()
|
||||
tcpAddrMe := &net.TCPAddr{IP: ip, Port: port}
|
||||
|
||||
messageVer, err := payload.NewVersionMessage(tcpAddrMe, sh, relay, proto, ua, nonce, services)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Write(messageVer)
|
||||
}
|
||||
|
||||
func (p *Peer) readRemoteVersionMSG() error {
|
||||
readmsg, err := wire.ReadMessage(p.conn, p.config.Net)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
version, ok := readmsg.(*payload.VersionMessage)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
return p.OnVersion(version)
|
||||
}
|
||||
|
||||
func (p *Peer) readVerack() error {
|
||||
readmsg, err := wire.ReadMessage(p.conn, p.config.Net)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, ok := readmsg.(*payload.VerackMessage)
|
||||
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
// should only be accessed on one go-routine
|
||||
p.verackReceived = true
|
||||
|
||||
return nil
|
||||
}
|
67
_pkg.dev/peer/readme.md
Normal file
67
_pkg.dev/peer/readme.md
Normal file
|
@ -0,0 +1,67 @@
|
|||
# Package - Peer
|
||||
|
||||
|
||||
|
||||
## Responsibility
|
||||
|
||||
Once a connection has been made. The connection will represent a established peer to the localNode. Since a connection and the `Wire` is a golang primitive, that we cannot do much with. The peer package will encapsulate both, while adding extra functionality.
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- The handshake protocol is automatically executed and handled by the peer package. If a Version/Verack is received twice, the peer will be disconnected.
|
||||
|
||||
- IdleTimeouts: If a Message is not received from the peer within a set period of time, the peer will be disconnected.
|
||||
|
||||
- StallTimeouts: For Example, If a GetHeaders, is sent to the Peer and a Headers Response is not received within a certain period of time, then the peer is disconnected.
|
||||
|
||||
- Concurrency Model: The concurrency model used is similar to Actor model, with a few changes. Messages can be sent to a peer asynchronously or synchronously. An example of an synchornous message send is the `RequestHeaders` method, where the channel blocks until an error value is received. The `OnHeaders` message is however asynchronously called. Furthermore, all methods passed through the config, are wrapped inside of an additional `Peers` method, this is to lay the ground work to capturing statistics regarding a specific command. These are also used so that we can pass behaviour to be executed down the channel.
|
||||
|
||||
- Configuration: Each Peer will have a config struct passed to it, with information about the Local Peer and functions that will encapsulate the behaviour of what the peer should do, given a request. This way, the peer is not dependent on any other package.
|
||||
|
||||
## Usage
|
||||
|
||||
conn, err := net.Dial("tcp", "seed2.neo.org:10333")
|
||||
if err != nil {
|
||||
fmt.Println("Error dialing connection", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
config := peer.LocalConfig{
|
||||
Net: protocol.MainNet,
|
||||
UserAgent: "NEO-G",
|
||||
Services: protocol.NodePeerService,
|
||||
Nonce: 1200,
|
||||
ProtocolVer: 0,
|
||||
Relay: false,
|
||||
Port: 10332,
|
||||
StartHeight: LocalHeight,
|
||||
OnHeader: OnHeader,
|
||||
}
|
||||
|
||||
p := peer.NewPeer(conn, false, config)
|
||||
err = p.Run()
|
||||
|
||||
hash, err := util.Uint256DecodeString(chainparams.GenesisHash)
|
||||
// hash2, err := util.Uint256DecodeString("ff8fe95efc5d1cc3a22b17503aecaf289cef68f94b79ddad6f613569ca2342d8")
|
||||
err = p.RequestHeaders(hash)
|
||||
|
||||
func OnHeader(peer *peer.Peer, msg *payload.HeadersMessage) {
|
||||
// This function is passed to peer
|
||||
// and the peer will execute it on receiving a header
|
||||
}
|
||||
|
||||
func LocalHeight() uint32 {
|
||||
// This will be a function from the object that handles the block heights
|
||||
return 10
|
||||
}
|
||||
|
||||
|
||||
### Notes
|
||||
|
||||
|
||||
Should we follow the actor model for Peers? Each Peer will have a ID, which we can take as the PID or if
|
||||
we launch a go-routine for each peer, then we can use that as an implicit PID.
|
||||
|
||||
Peer information should be stored into a database, if no db exists, we should get it from an initial peers file.
|
||||
We can use this to periodically store information about a peer.
|
111
_pkg.dev/peer/responsehandlers.go
Normal file
111
_pkg.dev/peer/responsehandlers.go
Normal file
|
@ -0,0 +1,111 @@
|
|||
package peer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
)
|
||||
|
||||
// OnGetData is called when a GetData message is received
|
||||
func (p *Peer) OnGetData(msg *payload.GetDataMessage) {
|
||||
p.inch <- func() {
|
||||
if p.config.OnInv != nil {
|
||||
p.config.OnGetData(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//OnTX is called when a TX message is received
|
||||
func (p *Peer) OnTX(msg *payload.TXMessage) {
|
||||
p.inch <- func() {
|
||||
p.inch <- func() {
|
||||
if p.config.OnTx != nil {
|
||||
p.config.OnTx(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnInv is called when a Inv message is received
|
||||
func (p *Peer) OnInv(msg *payload.InvMessage) {
|
||||
p.inch <- func() {
|
||||
if p.config.OnInv != nil {
|
||||
p.config.OnInv(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnGetHeaders is called when a GetHeaders message is received
|
||||
func (p *Peer) OnGetHeaders(msg *payload.GetHeadersMessage) {
|
||||
p.inch <- func() {
|
||||
if p.config.OnGetHeaders != nil {
|
||||
p.config.OnGetHeaders(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnAddr is called when a Addr message is received
|
||||
func (p *Peer) OnAddr(msg *payload.AddrMessage) {
|
||||
p.inch <- func() {
|
||||
if p.config.OnAddr != nil {
|
||||
p.config.OnAddr(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnGetAddr is called when a GetAddr message is received
|
||||
func (p *Peer) OnGetAddr(msg *payload.GetAddrMessage) {
|
||||
p.inch <- func() {
|
||||
if p.config.OnGetAddr != nil {
|
||||
p.config.OnGetAddr(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnGetBlocks is called when a GetBlocks message is received
|
||||
func (p *Peer) OnGetBlocks(msg *payload.GetBlocksMessage) {
|
||||
p.inch <- func() {
|
||||
if p.config.OnGetBlocks != nil {
|
||||
p.config.OnGetBlocks(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnBlocks is called when a Blocks message is received
|
||||
func (p *Peer) OnBlocks(msg *payload.BlockMessage) {
|
||||
p.Detector.RemoveMessage(msg.Command())
|
||||
p.inch <- func() {
|
||||
if p.config.OnBlock != nil {
|
||||
p.config.OnBlock(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnHeaders is called when a Headers message is received
|
||||
func (p *Peer) OnHeaders(msg *payload.HeadersMessage) {
|
||||
p.Detector.RemoveMessage(msg.Command())
|
||||
p.inch <- func() {
|
||||
if p.config.OnHeader != nil {
|
||||
p.config.OnHeader(p, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnVersion Listener will be called
|
||||
// during the handshake, any error checking should be done here for the versionMessage.
|
||||
// This should only ever be called during the handshake. Any other place and the peer will disconnect.
|
||||
func (p *Peer) OnVersion(msg *payload.VersionMessage) error {
|
||||
if msg.Nonce == p.config.Nonce {
|
||||
p.conn.Close()
|
||||
return errors.New("self connection, disconnecting Peer")
|
||||
}
|
||||
p.versionKnown = true
|
||||
p.port = msg.Port
|
||||
p.services = msg.Services
|
||||
p.userAgent = string(msg.UserAgent)
|
||||
p.createdAt = time.Now()
|
||||
p.relay = msg.Relay
|
||||
p.startHeight = msg.StartHeight
|
||||
return nil
|
||||
}
|
175
_pkg.dev/peer/stall/stall.go
Normal file
175
_pkg.dev/peer/stall/stall.go
Normal file
|
@ -0,0 +1,175 @@
|
|||
package stall
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/command"
|
||||
)
|
||||
|
||||
// Detector (stall detector) will keep track of all pendingMessages
|
||||
// If any message takes too long to reply
|
||||
// the detector will disconnect the peer
|
||||
type Detector struct {
|
||||
responseTime time.Duration
|
||||
tickInterval time.Duration
|
||||
|
||||
lock *sync.RWMutex
|
||||
responses map[command.Type]time.Time
|
||||
|
||||
// The detector is embedded into a peer and the peer watches this quit chan
|
||||
// If this chan is closed, the peer disconnects
|
||||
Quitch chan struct{}
|
||||
|
||||
// atomic vals
|
||||
disconnected int32
|
||||
}
|
||||
|
||||
// NewDetector will create a new stall detector
|
||||
// rT is the responseTime and signals how long
|
||||
// a peer has to reply back to a sent message
|
||||
// tickerInterval is how often the detector wil check for stalled messages
|
||||
func NewDetector(rTime time.Duration, tickerInterval time.Duration) *Detector {
|
||||
d := &Detector{
|
||||
responseTime: rTime,
|
||||
tickInterval: tickerInterval,
|
||||
lock: new(sync.RWMutex),
|
||||
responses: map[command.Type]time.Time{},
|
||||
Quitch: make(chan struct{}),
|
||||
}
|
||||
go d.loop()
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Detector) loop() {
|
||||
ticker := time.NewTicker(d.tickInterval)
|
||||
|
||||
defer func() {
|
||||
d.Quit()
|
||||
d.DeleteAll()
|
||||
ticker.Stop()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
now := time.Now()
|
||||
d.lock.RLock()
|
||||
resp := d.responses
|
||||
d.lock.RUnlock()
|
||||
for _, deadline := range resp {
|
||||
if now.After(deadline) {
|
||||
fmt.Println(resp)
|
||||
fmt.Println("Deadline passed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Quit is a concurrent safe way to call the Quit channel
|
||||
// Without blocking
|
||||
func (d *Detector) Quit() {
|
||||
// return if already disconnected
|
||||
if atomic.LoadInt32(&d.disconnected) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt32(&d.disconnected, 1)
|
||||
close(d.Quitch)
|
||||
}
|
||||
|
||||
//AddMessage will add a message to the responses map
|
||||
// Call this function when we send a message to a peer
|
||||
// The command passed through is the command that we sent
|
||||
// we will then set a timer for the expected message(s)
|
||||
func (d *Detector) AddMessage(cmd command.Type) {
|
||||
cmds := d.addMessage(cmd)
|
||||
d.lock.Lock()
|
||||
for _, cmd := range cmds {
|
||||
d.responses[cmd] = time.Now().Add(d.responseTime)
|
||||
}
|
||||
d.lock.Unlock()
|
||||
}
|
||||
|
||||
// RemoveMessage remove messages from the responses map
|
||||
// Call this function when we receive a message from
|
||||
// peer. This will remove the pendingresponse message from the map.
|
||||
// The command passed through is the command we received
|
||||
func (d *Detector) RemoveMessage(cmd command.Type) {
|
||||
cmds := d.removeMessage(cmd)
|
||||
d.lock.Lock()
|
||||
for _, cmd := range cmds {
|
||||
delete(d.responses, cmd)
|
||||
}
|
||||
d.lock.Unlock()
|
||||
}
|
||||
|
||||
// DeleteAll empties the map of all contents and
|
||||
// is called when the detector is being shut down
|
||||
func (d *Detector) DeleteAll() {
|
||||
d.lock.Lock()
|
||||
d.responses = make(map[command.Type]time.Time)
|
||||
d.lock.Unlock()
|
||||
}
|
||||
|
||||
// GetMessages Will return a map of all of the pendingResponses
|
||||
// and their deadlines
|
||||
func (d *Detector) GetMessages() map[command.Type]time.Time {
|
||||
var resp map[command.Type]time.Time
|
||||
d.lock.RLock()
|
||||
resp = d.responses
|
||||
d.lock.RUnlock()
|
||||
return resp
|
||||
}
|
||||
|
||||
// when a message is added, we will add a deadline for
|
||||
// expected response
|
||||
func (d *Detector) addMessage(cmd command.Type) []command.Type {
|
||||
var cmds []command.Type
|
||||
|
||||
switch cmd {
|
||||
case command.GetHeaders:
|
||||
// We now will expect a Headers Message
|
||||
cmds = append(cmds, command.Headers)
|
||||
case command.GetAddr:
|
||||
// We now will expect a Headers Message
|
||||
cmds = append(cmds, command.Addr)
|
||||
case command.GetData:
|
||||
// We will now expect a block/tx message
|
||||
cmds = append(cmds, command.Block)
|
||||
cmds = append(cmds, command.TX)
|
||||
case command.GetBlocks:
|
||||
// we will now expect a inv message
|
||||
cmds = append(cmds, command.Inv)
|
||||
case command.Version:
|
||||
// We will now expect a verack
|
||||
cmds = append(cmds, command.Verack)
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
// if receive a message, we will delete it from pending
|
||||
func (d *Detector) removeMessage(cmd command.Type) []command.Type {
|
||||
var cmds []command.Type
|
||||
|
||||
switch cmd {
|
||||
case command.Block:
|
||||
// We will now remove a block and tx message
|
||||
cmds = append(cmds, command.Block)
|
||||
cmds = append(cmds, command.TX)
|
||||
case command.TX:
|
||||
// We will now remove a block and tx message
|
||||
cmds = append(cmds, command.Block)
|
||||
cmds = append(cmds, command.TX)
|
||||
case command.Verack:
|
||||
// We will now expect a verack
|
||||
cmds = append(cmds, cmd)
|
||||
default:
|
||||
cmds = append(cmds, cmd)
|
||||
}
|
||||
return cmds
|
||||
}
|
84
_pkg.dev/peer/stall/stall_test.go
Normal file
84
_pkg.dev/peer/stall/stall_test.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
package stall
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/command"
|
||||
)
|
||||
|
||||
func TestAddRemoveMessage(t *testing.T) {
|
||||
|
||||
responseTime := 2 * time.Millisecond
|
||||
tickerInterval := 1 * time.Millisecond
|
||||
|
||||
d := NewDetector(responseTime, tickerInterval)
|
||||
d.AddMessage(command.GetAddr)
|
||||
mp := d.GetMessages()
|
||||
|
||||
assert.Equal(t, 1, len(mp))
|
||||
assert.IsType(t, time.Time{}, mp[command.GetAddr])
|
||||
|
||||
d.RemoveMessage(command.Addr)
|
||||
mp = d.GetMessages()
|
||||
|
||||
assert.Equal(t, 0, len(mp))
|
||||
assert.Empty(t, mp[command.GetAddr])
|
||||
}
|
||||
|
||||
type mockPeer struct {
|
||||
lock *sync.RWMutex
|
||||
online bool
|
||||
detector *Detector
|
||||
}
|
||||
|
||||
func (mp *mockPeer) loop() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-mp.detector.Quitch:
|
||||
|
||||
break loop
|
||||
}
|
||||
}
|
||||
// cleanup
|
||||
mp.lock.Lock()
|
||||
mp.online = false
|
||||
mp.lock.Unlock()
|
||||
}
|
||||
func TestDeadlineWorks(t *testing.T) {
|
||||
|
||||
responseTime := 2 * time.Millisecond
|
||||
tickerInterval := 1 * time.Millisecond
|
||||
|
||||
d := NewDetector(responseTime, tickerInterval)
|
||||
mp := mockPeer{online: true, detector: d, lock: new(sync.RWMutex)}
|
||||
go mp.loop()
|
||||
|
||||
d.AddMessage(command.GetAddr)
|
||||
time.Sleep(responseTime + 1*time.Millisecond)
|
||||
|
||||
k := make(map[command.Type]time.Time)
|
||||
d.lock.RLock()
|
||||
assert.Equal(t, k, d.responses)
|
||||
d.lock.RUnlock()
|
||||
mp.lock.RLock()
|
||||
assert.Equal(t, false, mp.online)
|
||||
mp.lock.RUnlock()
|
||||
}
|
||||
func TestDeadlineShouldNotBeEmpty(t *testing.T) {
|
||||
responseTime := 10 * time.Millisecond
|
||||
tickerInterval := 1 * time.Millisecond
|
||||
|
||||
d := NewDetector(responseTime, tickerInterval)
|
||||
d.AddMessage(command.GetAddr)
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
|
||||
k := make(map[command.Type]time.Time)
|
||||
d.lock.RLock()
|
||||
assert.NotEqual(t, k, d.responses)
|
||||
d.lock.RUnlock()
|
||||
}
|
155
_pkg.dev/peermgr/blockcache.go
Normal file
155
_pkg.dev/peermgr/blockcache.go
Normal file
|
@ -0,0 +1,155 @@
|
|||
package peermgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
var (
|
||||
//ErrCacheLimit is returned when the cache limit is reached
|
||||
ErrCacheLimit = errors.New("nomore items can be added to the cache")
|
||||
|
||||
//ErrNoItems is returned when pickItem is called and there are no items in the cache
|
||||
ErrNoItems = errors.New("there are no items in the cache")
|
||||
|
||||
//ErrDuplicateItem is returned when you try to add the same item, more than once to the cache
|
||||
ErrDuplicateItem = errors.New("this item is already in the cache")
|
||||
)
|
||||
|
||||
//BlockInfo holds the necessary information that the cache needs
|
||||
// to sort and store block requests
|
||||
type BlockInfo struct {
|
||||
BlockHash util.Uint256
|
||||
BlockIndex uint32
|
||||
}
|
||||
|
||||
// Equals returns true if two blockInfo objects
|
||||
// have the same hash and the same index
|
||||
func (bi *BlockInfo) Equals(other BlockInfo) bool {
|
||||
return bi.BlockHash.Equals(other.BlockHash) && bi.BlockIndex == other.BlockIndex
|
||||
}
|
||||
|
||||
// indexSorter sorts the blockInfos by blockIndex.
|
||||
type indexSorter []BlockInfo
|
||||
|
||||
func (is indexSorter) Len() int { return len(is) }
|
||||
func (is indexSorter) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
|
||||
func (is indexSorter) Less(i, j int) bool { return is[i].BlockIndex < is[j].BlockIndex }
|
||||
|
||||
//blockCache will cache any pending block requests
|
||||
// for the node when there are no available nodes
|
||||
type blockCache struct {
|
||||
cacheLimit int
|
||||
cacheLock sync.Mutex
|
||||
cache []BlockInfo
|
||||
}
|
||||
|
||||
func newBlockCache(cacheLimit int) *blockCache {
|
||||
return &blockCache{
|
||||
cache: make([]BlockInfo, 0, cacheLimit),
|
||||
cacheLimit: cacheLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *blockCache) addBlockInfo(bi BlockInfo) error {
|
||||
if bc.cacheLen() == bc.cacheLimit {
|
||||
return ErrCacheLimit
|
||||
}
|
||||
|
||||
bc.cacheLock.Lock()
|
||||
defer bc.cacheLock.Unlock()
|
||||
|
||||
// Check for duplicates. slice will always be small so a simple for loop will work
|
||||
for _, bInfo := range bc.cache {
|
||||
if bInfo.Equals(bi) {
|
||||
return ErrDuplicateItem
|
||||
}
|
||||
}
|
||||
bc.cache = append(bc.cache, bi)
|
||||
|
||||
sort.Sort(indexSorter(bc.cache))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *blockCache) addBlockInfos(bis []BlockInfo) error {
|
||||
|
||||
if len(bis)+bc.cacheLen() > bc.cacheLimit {
|
||||
return errors.New("too many items to add, this will exceed the cache limit")
|
||||
}
|
||||
|
||||
for _, bi := range bis {
|
||||
err := bc.addBlockInfo(bi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *blockCache) cacheLen() int {
|
||||
bc.cacheLock.Lock()
|
||||
defer bc.cacheLock.Unlock()
|
||||
return len(bc.cache)
|
||||
}
|
||||
|
||||
func (bc *blockCache) pickFirstItem() (BlockInfo, error) {
|
||||
return bc.pickItem(0)
|
||||
}
|
||||
|
||||
func (bc *blockCache) pickAllItems() ([]BlockInfo, error) {
|
||||
|
||||
numOfItems := bc.cacheLen()
|
||||
|
||||
items := make([]BlockInfo, 0, numOfItems)
|
||||
|
||||
for i := 0; i < numOfItems; i++ {
|
||||
bi, err := bc.pickFirstItem()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, bi)
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
func (bc *blockCache) pickItem(i uint) (BlockInfo, error) {
|
||||
if bc.cacheLen() < 1 {
|
||||
return BlockInfo{}, ErrNoItems
|
||||
}
|
||||
|
||||
if i >= uint(bc.cacheLen()) {
|
||||
return BlockInfo{}, errors.New("index out of range")
|
||||
}
|
||||
|
||||
bc.cacheLock.Lock()
|
||||
defer bc.cacheLock.Unlock()
|
||||
|
||||
item := bc.cache[i]
|
||||
bc.cache = append(bc.cache[:i], bc.cache[i+1:]...)
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (bc *blockCache) removeHash(hashToRemove util.Uint256) error {
|
||||
index, err := bc.findHash(hashToRemove)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bc.pickItem(uint(index))
|
||||
return err
|
||||
}
|
||||
|
||||
func (bc *blockCache) findHash(hashToFind util.Uint256) (int, error) {
|
||||
bc.cacheLock.Lock()
|
||||
defer bc.cacheLock.Unlock()
|
||||
for i, bInfo := range bc.cache {
|
||||
if bInfo.BlockHash.Equals(hashToFind) {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return -1, errors.New("hash cannot be found in the cache")
|
||||
}
|
80
_pkg.dev/peermgr/blockcache_test.go
Normal file
80
_pkg.dev/peermgr/blockcache_test.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package peermgr
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAddBlock(t *testing.T) {
|
||||
|
||||
bc := &blockCache{
|
||||
cacheLimit: 20,
|
||||
}
|
||||
bi := randomBlockInfo(t)
|
||||
|
||||
err := bc.addBlockInfo(bi)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
assert.Equal(t, 1, bc.cacheLen())
|
||||
|
||||
err = bc.addBlockInfo(bi)
|
||||
assert.Equal(t, ErrDuplicateItem, err)
|
||||
|
||||
assert.Equal(t, 1, bc.cacheLen())
|
||||
}
|
||||
|
||||
func TestCacheLimit(t *testing.T) {
|
||||
|
||||
bc := &blockCache{
|
||||
cacheLimit: 20,
|
||||
}
|
||||
|
||||
for i := 0; i < bc.cacheLimit; i++ {
|
||||
err := bc.addBlockInfo(randomBlockInfo(t))
|
||||
assert.Equal(t, nil, err)
|
||||
}
|
||||
|
||||
err := bc.addBlockInfo(randomBlockInfo(t))
|
||||
assert.Equal(t, ErrCacheLimit, err)
|
||||
|
||||
assert.Equal(t, bc.cacheLimit, bc.cacheLen())
|
||||
}
|
||||
func TestPickItem(t *testing.T) {
|
||||
|
||||
bc := &blockCache{
|
||||
cacheLimit: 20,
|
||||
}
|
||||
|
||||
for i := 0; i < bc.cacheLimit; i++ {
|
||||
err := bc.addBlockInfo(randomBlockInfo(t))
|
||||
assert.Equal(t, nil, err)
|
||||
}
|
||||
|
||||
for i := 0; i < bc.cacheLimit; i++ {
|
||||
_, err := bc.pickFirstItem()
|
||||
assert.Equal(t, nil, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 0, bc.cacheLen())
|
||||
}
|
||||
|
||||
func randomUint256(t *testing.T) util.Uint256 {
|
||||
rand32 := make([]byte, 32)
|
||||
rand.Read(rand32)
|
||||
|
||||
u, err := util.Uint256DecodeBytes(rand32)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func randomBlockInfo(t *testing.T) BlockInfo {
|
||||
|
||||
return BlockInfo{
|
||||
randomUint256(t),
|
||||
rand.Uint32(),
|
||||
}
|
||||
}
|
227
_pkg.dev/peermgr/peermgr.go
Normal file
227
_pkg.dev/peermgr/peermgr.go
Normal file
|
@ -0,0 +1,227 @@
|
|||
package peermgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/command"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockCacheLimit is the maximum amount of pending requests that the cache can hold
|
||||
pendingBlockCacheLimit = 20
|
||||
|
||||
//peerBlockCacheLimit is the maximum amount of inflight blocks that a peer can
|
||||
// have, before they are flagged as busy
|
||||
peerBlockCacheLimit = 1
|
||||
)
|
||||
|
||||
var (
|
||||
//ErrNoAvailablePeers is returned when a request for data from a peer is invoked
|
||||
// but there are no available peers to request data from
|
||||
ErrNoAvailablePeers = errors.New("there are no available peers to interact with")
|
||||
|
||||
// ErrUnknownPeer is returned when a peer that the peer manager does not know about
|
||||
// sends a message to this node
|
||||
ErrUnknownPeer = errors.New("this peer has not been registered with the peer manager")
|
||||
)
|
||||
|
||||
//mPeer represents a peer that is managed by the peer manager
|
||||
type mPeer interface {
|
||||
Disconnect()
|
||||
RequestBlocks([]util.Uint256) error
|
||||
RequestHeaders(util.Uint256) error
|
||||
NotifyDisconnect()
|
||||
}
|
||||
|
||||
type peerstats struct {
|
||||
// when a peer is sent a blockRequest
|
||||
// the peermanager will track this using this blockCache
|
||||
blockCache *blockCache
|
||||
// all other requests will be tracked using the requests map
|
||||
requests map[command.Type]bool
|
||||
}
|
||||
|
||||
//PeerMgr manages all peers that the node is connected to
|
||||
type PeerMgr struct {
|
||||
pLock sync.RWMutex
|
||||
peers map[mPeer]peerstats
|
||||
|
||||
requestCache *blockCache
|
||||
}
|
||||
|
||||
//New returns a new peermgr object
|
||||
func New() *PeerMgr {
|
||||
return &PeerMgr{
|
||||
peers: make(map[mPeer]peerstats),
|
||||
requestCache: newBlockCache(pendingBlockCacheLimit),
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer adds a peer to the list of managed peers
|
||||
func (pmgr *PeerMgr) AddPeer(peer mPeer) {
|
||||
|
||||
pmgr.pLock.Lock()
|
||||
defer pmgr.pLock.Unlock()
|
||||
if _, exists := pmgr.peers[peer]; exists {
|
||||
return
|
||||
}
|
||||
pmgr.peers[peer] = peerstats{
|
||||
requests: make(map[command.Type]bool),
|
||||
blockCache: newBlockCache(peerBlockCacheLimit),
|
||||
}
|
||||
go pmgr.onDisconnect(peer)
|
||||
}
|
||||
|
||||
//MsgReceived notifies the peer manager that we have received a
|
||||
// message from a peer
|
||||
func (pmgr *PeerMgr) MsgReceived(peer mPeer, cmd command.Type) error {
|
||||
pmgr.pLock.Lock()
|
||||
defer pmgr.pLock.Unlock()
|
||||
|
||||
// if peer was unknown then disconnect
|
||||
val, ok := pmgr.peers[peer]
|
||||
if !ok {
|
||||
|
||||
go func() {
|
||||
peer.NotifyDisconnect()
|
||||
}()
|
||||
|
||||
peer.Disconnect()
|
||||
return ErrUnknownPeer
|
||||
}
|
||||
val.requests[cmd] = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//BlockMsgReceived notifies the peer manager that we have received a
|
||||
// block message from a peer
|
||||
func (pmgr *PeerMgr) BlockMsgReceived(peer mPeer, bi BlockInfo) error {
|
||||
|
||||
// if peer was unknown then disconnect
|
||||
val, ok := pmgr.peers[peer]
|
||||
if !ok {
|
||||
|
||||
go func() {
|
||||
peer.NotifyDisconnect()
|
||||
}()
|
||||
|
||||
peer.Disconnect()
|
||||
return ErrUnknownPeer
|
||||
}
|
||||
|
||||
// // remove item from the peersBlock cache
|
||||
err := val.blockCache.removeHash(bi.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if cache empty, if so then return
|
||||
if pmgr.requestCache.cacheLen() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to clean an item from the pendingBlockCache, a peer has just finished serving a block request
|
||||
cachedBInfo, err := pmgr.requestCache.pickFirstItem()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pmgr.blockCallPeer(cachedBInfo, func(p mPeer) error {
|
||||
return p.RequestBlocks([]util.Uint256{cachedBInfo.BlockHash})
|
||||
})
|
||||
}
|
||||
|
||||
// Len returns the amount of peers that the peer manager
|
||||
//currently knows about
|
||||
func (pmgr *PeerMgr) Len() int {
|
||||
pmgr.pLock.Lock()
|
||||
defer pmgr.pLock.Unlock()
|
||||
return len(pmgr.peers)
|
||||
}
|
||||
|
||||
// RequestBlock will request a block from the most
|
||||
// available peer. Then update it's stats, so we know that
|
||||
// this peer is busy
|
||||
func (pmgr *PeerMgr) RequestBlock(bi BlockInfo) error {
|
||||
pmgr.pLock.Lock()
|
||||
defer pmgr.pLock.Unlock()
|
||||
|
||||
err := pmgr.blockCallPeer(bi, func(p mPeer) error {
|
||||
return p.RequestBlocks([]util.Uint256{bi.BlockHash})
|
||||
})
|
||||
|
||||
if err == ErrNoAvailablePeers {
|
||||
return pmgr.requestCache.addBlockInfo(bi)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// RequestHeaders will request a headers from the most available peer.
|
||||
func (pmgr *PeerMgr) RequestHeaders(hash util.Uint256) error {
|
||||
pmgr.pLock.Lock()
|
||||
defer pmgr.pLock.Unlock()
|
||||
return pmgr.callPeerForCmd(command.Headers, func(p mPeer) error {
|
||||
return p.RequestHeaders(hash)
|
||||
})
|
||||
}
|
||||
|
||||
func (pmgr *PeerMgr) callPeerForCmd(cmd command.Type, f func(p mPeer) error) error {
|
||||
for peer, stats := range pmgr.peers {
|
||||
if !stats.requests[cmd] {
|
||||
stats.requests[cmd] = true
|
||||
return f(peer)
|
||||
}
|
||||
}
|
||||
return ErrNoAvailablePeers
|
||||
}
|
||||
|
||||
func (pmgr *PeerMgr) blockCallPeer(bi BlockInfo, f func(p mPeer) error) error {
|
||||
for peer, stats := range pmgr.peers {
|
||||
if stats.blockCache.cacheLen() < peerBlockCacheLimit {
|
||||
err := stats.blockCache.addBlockInfo(bi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f(peer)
|
||||
}
|
||||
}
|
||||
return ErrNoAvailablePeers
|
||||
}
|
||||
|
||||
func (pmgr *PeerMgr) onDisconnect(p mPeer) {
|
||||
|
||||
// Blocking until peer is disconnected
|
||||
p.NotifyDisconnect()
|
||||
|
||||
pmgr.pLock.Lock()
|
||||
defer func() {
|
||||
delete(pmgr.peers, p)
|
||||
pmgr.pLock.Unlock()
|
||||
}()
|
||||
|
||||
// Add all of peers outstanding block requests into
|
||||
// the peer managers pendingBlockRequestCache
|
||||
|
||||
val, ok := pmgr.peers[p]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
pendingRequests, err := val.blockCache.pickAllItems()
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = pmgr.requestCache.addBlockInfos(pendingRequests)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
}
|
201
_pkg.dev/peermgr/peermgr_test.go
Normal file
201
_pkg.dev/peermgr/peermgr_test.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
package peermgr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/command"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type peer struct {
|
||||
quit chan bool
|
||||
nonce int
|
||||
disconnected bool
|
||||
blockRequested int
|
||||
headersRequested int
|
||||
}
|
||||
|
||||
func (p *peer) Disconnect() {
|
||||
p.disconnected = true
|
||||
p.quit <- true
|
||||
}
|
||||
func (p *peer) RequestBlocks([]util.Uint256) error {
|
||||
p.blockRequested++
|
||||
return nil
|
||||
}
|
||||
func (p *peer) RequestHeaders(util.Uint256) error {
|
||||
p.headersRequested++
|
||||
return nil
|
||||
}
|
||||
func (p *peer) NotifyDisconnect() {
|
||||
<-p.quit
|
||||
}
|
||||
|
||||
func TestAddPeer(t *testing.T) {
|
||||
pmgr := New()
|
||||
|
||||
peerA := &peer{nonce: 1}
|
||||
peerB := &peer{nonce: 2}
|
||||
peerC := &peer{nonce: 3}
|
||||
|
||||
pmgr.AddPeer(peerA)
|
||||
pmgr.AddPeer(peerB)
|
||||
pmgr.AddPeer(peerC)
|
||||
pmgr.AddPeer(peerC)
|
||||
|
||||
assert.Equal(t, 3, pmgr.Len())
|
||||
}
|
||||
|
||||
func TestRequestBlocks(t *testing.T) {
|
||||
pmgr := New()
|
||||
|
||||
peerA := &peer{nonce: 1}
|
||||
peerB := &peer{nonce: 2}
|
||||
peerC := &peer{nonce: 3}
|
||||
|
||||
pmgr.AddPeer(peerA)
|
||||
pmgr.AddPeer(peerB)
|
||||
pmgr.AddPeer(peerC)
|
||||
|
||||
firstBlock := randomBlockInfo(t)
|
||||
err := pmgr.RequestBlock(firstBlock)
|
||||
assert.Nil(t, err)
|
||||
|
||||
secondBlock := randomBlockInfo(t)
|
||||
err = pmgr.RequestBlock(secondBlock)
|
||||
assert.Nil(t, err)
|
||||
|
||||
thirdBlock := randomBlockInfo(t)
|
||||
err = pmgr.RequestBlock(thirdBlock)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Since the peer manager did not get a MsgReceived
|
||||
// in between the block requests
|
||||
// a request should be sent to all peers
|
||||
// This is only true, if peerBlockCacheLimit == 1
|
||||
|
||||
assert.Equal(t, 1, peerA.blockRequested)
|
||||
assert.Equal(t, 1, peerB.blockRequested)
|
||||
assert.Equal(t, 1, peerC.blockRequested)
|
||||
|
||||
// Since the peer manager still has not received a MsgReceived
|
||||
// another call to request blocks, will add the request to the cache
|
||||
// and return a nil err
|
||||
|
||||
fourthBlock := randomBlockInfo(t)
|
||||
err = pmgr.RequestBlock(fourthBlock)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, 1, pmgr.requestCache.cacheLen())
|
||||
|
||||
// If we tell the peer manager that we have received a block
|
||||
// it will check the cache for any pending requests and send a block request if there are any.
|
||||
// The request will go to the peer who sent back the block corresponding to the first hash
|
||||
// since the other two peers are still busy with their block requests
|
||||
|
||||
peer := findPeerwithHash(t, pmgr, firstBlock.BlockHash)
|
||||
err = pmgr.BlockMsgReceived(peer, firstBlock)
|
||||
assert.Nil(t, err)
|
||||
|
||||
totalRequests := peerA.blockRequested + peerB.blockRequested + peerC.blockRequested
|
||||
assert.Equal(t, 4, totalRequests)
|
||||
|
||||
// // cache should be empty now
|
||||
assert.Equal(t, 0, pmgr.requestCache.cacheLen())
|
||||
}
|
||||
|
||||
// The peer manager does not tell you what peer was sent a particular block request
|
||||
// For testing purposes, the following function will find that peer
|
||||
func findPeerwithHash(t *testing.T, pmgr *PeerMgr, blockHash util.Uint256) mPeer {
|
||||
for peer, stats := range pmgr.peers {
|
||||
_, err := stats.blockCache.findHash(blockHash)
|
||||
if err == nil {
|
||||
return peer
|
||||
}
|
||||
}
|
||||
assert.Fail(t, "cannot find a peer with that hash")
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRequestHeaders(t *testing.T) {
|
||||
pmgr := New()
|
||||
|
||||
peerA := &peer{nonce: 1}
|
||||
peerB := &peer{nonce: 2}
|
||||
peerC := &peer{nonce: 3}
|
||||
|
||||
pmgr.AddPeer(peerA)
|
||||
pmgr.AddPeer(peerB)
|
||||
pmgr.AddPeer(peerC)
|
||||
|
||||
err := pmgr.RequestHeaders(util.Uint256{})
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = pmgr.RequestHeaders(util.Uint256{})
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = pmgr.RequestHeaders(util.Uint256{})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Since the peer manager did not get a MsgReceived
|
||||
// in between the header requests
|
||||
// a request should be sent to all peers
|
||||
|
||||
assert.Equal(t, 1, peerA.headersRequested)
|
||||
assert.Equal(t, 1, peerB.headersRequested)
|
||||
assert.Equal(t, 1, peerC.headersRequested)
|
||||
|
||||
// Since the peer manager still has not received a MsgReceived
|
||||
// another call to request header, will return a NoAvailablePeerError
|
||||
|
||||
err = pmgr.RequestHeaders(util.Uint256{})
|
||||
assert.Equal(t, ErrNoAvailablePeers, err)
|
||||
|
||||
// If we tell the peer manager that peerA has given us a block
|
||||
// then send another BlockRequest. It will go to peerA
|
||||
// since the other two peers are still busy with their
|
||||
// block requests
|
||||
|
||||
err = pmgr.MsgReceived(peerA, command.Headers)
|
||||
assert.Nil(t, err)
|
||||
err = pmgr.RequestHeaders(util.Uint256{})
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 2, peerA.headersRequested)
|
||||
assert.Equal(t, 1, peerB.headersRequested)
|
||||
assert.Equal(t, 1, peerC.headersRequested)
|
||||
}
|
||||
|
||||
func TestUnknownPeer(t *testing.T) {
|
||||
pmgr := New()
|
||||
|
||||
unknownPeer := &peer{
|
||||
disconnected: false,
|
||||
quit: make(chan bool),
|
||||
}
|
||||
|
||||
err := pmgr.MsgReceived(unknownPeer, command.Headers)
|
||||
assert.Equal(t, true, unknownPeer.disconnected)
|
||||
assert.Equal(t, ErrUnknownPeer, err)
|
||||
}
|
||||
|
||||
func TestNotifyDisconnect(t *testing.T) {
|
||||
pmgr := New()
|
||||
|
||||
peerA := &peer{
|
||||
nonce: 1,
|
||||
quit: make(chan bool),
|
||||
}
|
||||
|
||||
pmgr.AddPeer(peerA)
|
||||
|
||||
if pmgr.Len() != 1 {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
peerA.Disconnect()
|
||||
|
||||
if pmgr.Len() != 0 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
7
_pkg.dev/server/addrmgr.go
Normal file
7
_pkg.dev/server/addrmgr.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
package server
|
||||
|
||||
// etAddress will return a viable address to connect to
|
||||
// Currently it is hardcoded to be one neo node until address manager is implemented
|
||||
func (s *Server) getAddress() (string, error) {
|
||||
return "seed1.ngd.network:10333", nil
|
||||
}
|
15
_pkg.dev/server/chain.go
Normal file
15
_pkg.dev/server/chain.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/chain"
|
||||
"github.com/CityOfZion/neo-go/pkg/database"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
)
|
||||
|
||||
func setupChain(db database.Database, net protocol.Magic) (*chain.Chain, error) {
|
||||
chain, err := chain.New(db, net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return chain, nil
|
||||
}
|
47
_pkg.dev/server/connmgr.go
Normal file
47
_pkg.dev/server/connmgr.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/connmgr"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/peer"
|
||||
iputils "github.com/CityOfZion/neo-go/pkg/wire/util/ip"
|
||||
)
|
||||
|
||||
func setupConnManager(s *Server, port uint16) (*connmgr.Connmgr, error) {
|
||||
cfg := connmgr.Config{
|
||||
GetAddress: s.getAddress,
|
||||
OnAccept: s.onAccept,
|
||||
OnConnection: s.onConnection,
|
||||
AddressPort: iputils.GetLocalIP().String() + ":" + strconv.FormatUint(uint64(port), 10),
|
||||
}
|
||||
return connmgr.New(cfg)
|
||||
}
|
||||
|
||||
func (s *Server) onConnection(conn net.Conn, addr string) {
|
||||
fmt.Println("We have connected successfully to: ", addr)
|
||||
|
||||
p := peer.NewPeer(conn, false, *s.peerCfg)
|
||||
err := p.Run()
|
||||
if err != nil {
|
||||
fmt.Println("Error running peer" + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.pmg.AddPeer(p)
|
||||
}
|
||||
|
||||
func (s *Server) onAccept(conn net.Conn) {
|
||||
fmt.Println("A peer with address: ", conn.RemoteAddr().String(), "has connect to us")
|
||||
|
||||
p := peer.NewPeer(conn, true, *s.peerCfg)
|
||||
err := p.Run()
|
||||
if err != nil {
|
||||
fmt.Println("Error running peer" + err.Error())
|
||||
return
|
||||
}
|
||||
s.pmg.AddPeer(p)
|
||||
}
|
14
_pkg.dev/server/database.go
Normal file
14
_pkg.dev/server/database.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/database"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
)
|
||||
|
||||
func setupDatabase(net protocol.Magic) (database.Database, error) {
|
||||
db, err := database.New(net.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
23
_pkg.dev/server/peerconfig.go
Normal file
23
_pkg.dev/server/peerconfig.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/peer"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
)
|
||||
|
||||
func setupPeerConfig(s *Server, port uint16, net protocol.Magic) *peer.LocalConfig {
|
||||
return &peer.LocalConfig{
|
||||
Net: net,
|
||||
UserAgent: "NEO-GO",
|
||||
Services: protocol.NodePeerService,
|
||||
Nonce: rand.Uint32(),
|
||||
ProtocolVer: 0,
|
||||
Relay: false,
|
||||
Port: port,
|
||||
StartHeight: s.chain.CurrentHeight,
|
||||
OnHeader: s.onHeader,
|
||||
OnBlock: s.onBlock,
|
||||
}
|
||||
}
|
9
_pkg.dev/server/peermgr.go
Normal file
9
_pkg.dev/server/peermgr.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/peermgr"
|
||||
)
|
||||
|
||||
func setupPeerManager() *peermgr.PeerMgr {
|
||||
return peermgr.New()
|
||||
}
|
120
_pkg.dev/server/server.go
Normal file
120
_pkg.dev/server/server.go
Normal file
|
@ -0,0 +1,120 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/peermgr"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/chain"
|
||||
"github.com/CityOfZion/neo-go/pkg/connmgr"
|
||||
"github.com/CityOfZion/neo-go/pkg/peer"
|
||||
"github.com/CityOfZion/neo-go/pkg/syncmgr"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/database"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
|
||||
)
|
||||
|
||||
// Server orchestrates all of the modules
|
||||
type Server struct {
|
||||
net protocol.Magic
|
||||
stopCh chan error
|
||||
|
||||
// Modules
|
||||
db database.Database
|
||||
smg *syncmgr.Syncmgr
|
||||
cmg *connmgr.Connmgr
|
||||
pmg *peermgr.PeerMgr
|
||||
chain *chain.Chain
|
||||
|
||||
peerCfg *peer.LocalConfig
|
||||
}
|
||||
|
||||
//New creates a new server object for a particular network and sets up each module
|
||||
func New(net protocol.Magic, port uint16) (*Server, error) {
|
||||
s := &Server{
|
||||
net: net,
|
||||
stopCh: make(chan error, 0),
|
||||
}
|
||||
|
||||
// Setup database
|
||||
db, err := setupDatabase(net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.db = db
|
||||
|
||||
// setup peermgr
|
||||
peermgr := setupPeerManager()
|
||||
s.pmg = peermgr
|
||||
|
||||
// Setup chain
|
||||
chain, err := setupChain(db, net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.chain = chain
|
||||
|
||||
// Setup sync manager
|
||||
syncmgr, err := setupSyncManager(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.smg = syncmgr
|
||||
|
||||
// Setup connection manager
|
||||
connmgr, err := setupConnManager(s, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.cmg = connmgr
|
||||
|
||||
// Setup peer config
|
||||
peerCfg := setupPeerConfig(s, port, net)
|
||||
s.peerCfg = peerCfg
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Run starts the daemon by connecting to previously nodes or connectng to seed nodes.
|
||||
// This should be called once all modules have been setup
|
||||
func (s *Server) Run() error {
|
||||
fmt.Println("Server is starting up")
|
||||
|
||||
// start the connmgr
|
||||
err := s.cmg.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Attempt to connect to a peer
|
||||
err = s.cmg.NewRequest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Request header to start synchronisation
|
||||
bestHeader, err := s.chain.Db.GetLastHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.pmg.RequestHeaders(bestHeader.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Server Successfully started")
|
||||
return s.wait()
|
||||
}
|
||||
|
||||
func (s *Server) wait() error {
|
||||
err := <-s.stopCh
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop stops the server
|
||||
func (s *Server) Stop(err error) error {
|
||||
fmt.Println("Server is shutting down")
|
||||
s.stopCh <- err
|
||||
return nil
|
||||
}
|
110
_pkg.dev/server/syncmgr.go
Normal file
110
_pkg.dev/server/syncmgr.go
Normal file
|
@ -0,0 +1,110 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/peermgr"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/peer"
|
||||
"github.com/CityOfZion/neo-go/pkg/syncmgr"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
func setupSyncManager(s *Server) (*syncmgr.Syncmgr, error) {
|
||||
|
||||
cfg := &syncmgr.Config{
|
||||
ProcessBlock: s.processBlock,
|
||||
ProcessHeaders: s.processHeaders,
|
||||
|
||||
RequestBlock: s.requestBlock,
|
||||
RequestHeaders: s.requestHeaders,
|
||||
|
||||
GetNextBlockHash: s.getNextBlockHash,
|
||||
AskForNewBlocks: s.askForNewBlocks,
|
||||
|
||||
FetchHeadersAgain: s.fetchHeadersAgain,
|
||||
FetchBlockAgain: s.fetchBlockAgain,
|
||||
}
|
||||
|
||||
// Add nextBlockIndex in syncmgr
|
||||
lastBlock, err := s.chain.Db.GetLastBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nextBlockIndex := lastBlock.Index + 1
|
||||
|
||||
return syncmgr.New(cfg, nextBlockIndex), nil
|
||||
}
|
||||
|
||||
func (s *Server) onHeader(peer *peer.Peer, hdrsMessage *payload.HeadersMessage) {
|
||||
s.pmg.MsgReceived(peer, hdrsMessage.Command())
|
||||
s.smg.OnHeader(peer, hdrsMessage)
|
||||
}
|
||||
|
||||
func (s *Server) onBlock(peer *peer.Peer, blockMsg *payload.BlockMessage) {
|
||||
s.pmg.BlockMsgReceived(peer, peermgr.BlockInfo{
|
||||
BlockHash: blockMsg.Hash,
|
||||
BlockIndex: blockMsg.Index,
|
||||
})
|
||||
s.smg.OnBlock(peer, blockMsg)
|
||||
}
|
||||
|
||||
func (s *Server) processBlock(block payload.Block) error {
|
||||
return s.chain.ProcessBlock(block)
|
||||
}
|
||||
|
||||
func (s *Server) processHeaders(hdrs []*payload.BlockBase) error {
|
||||
return s.chain.ProcessHeaders(hdrs)
|
||||
}
|
||||
|
||||
func (s *Server) requestHeaders(hash util.Uint256) error {
|
||||
return s.pmg.RequestHeaders(hash)
|
||||
}
|
||||
|
||||
func (s *Server) requestBlock(hash util.Uint256, index uint32) error {
|
||||
return s.pmg.RequestBlock(peermgr.BlockInfo{
|
||||
BlockHash: hash,
|
||||
BlockIndex: index,
|
||||
})
|
||||
}
|
||||
|
||||
// getNextBlockHash searches the database for the blockHash
|
||||
// that is the height above our best block. The hash will be taken from a header.
|
||||
func (s *Server) getNextBlockHash() (util.Uint256, error) {
|
||||
bestBlock, err := s.chain.Db.GetLastBlock()
|
||||
if err != nil {
|
||||
// Panic!
|
||||
// XXX: One alternative, is to get the network, erase the database and then start again from scratch.
|
||||
// This should never happen. The latest block will always be atleast the genesis block
|
||||
panic("could not get best block from database" + err.Error())
|
||||
}
|
||||
|
||||
index := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(index, bestBlock.Index+1)
|
||||
|
||||
hdr, err := s.chain.Db.GetHeaderFromHeight(index)
|
||||
if err != nil {
|
||||
return util.Uint256{}, err
|
||||
}
|
||||
return hdr.Hash, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBestBlockHash() (util.Uint256, error) {
|
||||
return util.Uint256{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) askForNewBlocks() {
|
||||
// send a getblocks message with the latest block saved
|
||||
|
||||
// when we receive something then send get data
|
||||
}
|
||||
|
||||
func (s *Server) fetchHeadersAgain(util.Uint256) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) fetchBlockAgain(util.Uint256) error {
|
||||
return nil
|
||||
}
|
61
_pkg.dev/syncmgr/blockmode.go
Normal file
61
_pkg.dev/syncmgr/blockmode.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/chain"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
)
|
||||
|
||||
// blockModeOnBlock is called when the sync manager is block mode
|
||||
// and receives a block.
|
||||
func (s *Syncmgr) blockModeOnBlock(peer SyncPeer, block payload.Block) error {
|
||||
|
||||
// Check if it is a future block
|
||||
// XXX: since we are storing blocks in memory, we do not want to store blocks
|
||||
// from the tip
|
||||
if block.Index > s.nextBlockIndex+2000 {
|
||||
return nil
|
||||
}
|
||||
if block.Index > s.nextBlockIndex {
|
||||
s.addToBlockPool(block)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process Block
|
||||
err := s.processBlock(block)
|
||||
if err != nil && err != chain.ErrBlockAlreadyExists {
|
||||
return s.cfg.FetchBlockAgain(block.Hash)
|
||||
}
|
||||
|
||||
// Check the block pool
|
||||
err = s.checkPool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if blockhashReceived == the header hash from last get headers this node performed
|
||||
// if not then increment and request next block
|
||||
if s.headerHash != block.Hash {
|
||||
nextHash, err := s.cfg.GetNextBlockHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cfg.RequestBlock(nextHash, block.Index)
|
||||
}
|
||||
|
||||
// If we are caught up then go into normal mode
|
||||
diff := peer.Height() - block.Index
|
||||
if diff <= cruiseHeight {
|
||||
s.syncmode = normalMode
|
||||
s.timer.Reset(blockTimer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If not then we go back into headersMode and request more headers.
|
||||
s.syncmode = headersMode
|
||||
return s.cfg.RequestHeaders(block.Hash)
|
||||
}
|
||||
|
||||
func (s *Syncmgr) blockModeOnHeaders(peer SyncPeer, hdrs []*payload.BlockBase) error {
|
||||
// We ignore headers when in this mode
|
||||
return nil
|
||||
}
|
57
_pkg.dev/syncmgr/blockpool.go
Normal file
57
_pkg.dev/syncmgr/blockpool.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
)
|
||||
|
||||
func (s *Syncmgr) addToBlockPool(newBlock payload.Block) {
|
||||
s.poolLock.Lock()
|
||||
defer s.poolLock.Unlock()
|
||||
|
||||
for _, block := range s.blockPool {
|
||||
if block.Index == newBlock.Index {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.blockPool = append(s.blockPool, newBlock)
|
||||
|
||||
// sort slice using block index
|
||||
sort.Slice(s.blockPool, func(i, j int) bool {
|
||||
return s.blockPool[i].Index < s.blockPool[j].Index
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (s *Syncmgr) checkPool() error {
|
||||
// Assuming that the blocks are sorted in order
|
||||
|
||||
var indexesToRemove = -1
|
||||
|
||||
s.poolLock.Lock()
|
||||
defer func() {
|
||||
// removes all elements before this index, including the element at this index
|
||||
s.blockPool = s.blockPool[indexesToRemove+1:]
|
||||
s.poolLock.Unlock()
|
||||
}()
|
||||
|
||||
// loop iterates through the cache, processing any
|
||||
// blocks that can be added to the chain
|
||||
for i, block := range s.blockPool {
|
||||
if s.nextBlockIndex != block.Index {
|
||||
break
|
||||
}
|
||||
|
||||
// Save this block and save the indice location so we can remove it, when we defer
|
||||
err := s.processBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
indexesToRemove = i
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
42
_pkg.dev/syncmgr/blockpool_test.go
Normal file
42
_pkg.dev/syncmgr/blockpool_test.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAddBlockPoolFlush(t *testing.T) {
|
||||
syncmgr, _ := setupSyncMgr(blockMode, 10)
|
||||
|
||||
blockMessage := randomBlockMessage(t, 11)
|
||||
|
||||
peer := &mockPeer{
|
||||
height: 100,
|
||||
}
|
||||
|
||||
// Since the block has Index 11 and the sync manager needs the block with index 10
|
||||
// This block will be added to the blockPool
|
||||
err := syncmgr.OnBlock(peer, blockMessage)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(syncmgr.blockPool))
|
||||
|
||||
// The sync manager is still looking for the block at height 10
|
||||
// Since this block is at height 12, it will be added to the block pool
|
||||
blockMessage = randomBlockMessage(t, 12)
|
||||
err = syncmgr.OnBlock(peer, blockMessage)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(syncmgr.blockPool))
|
||||
|
||||
// This is the block that the sync manager was waiting for
|
||||
// It should process this block, the check the pool for the next set of blocks
|
||||
blockMessage = randomBlockMessage(t, 10)
|
||||
err = syncmgr.OnBlock(peer, blockMessage)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(syncmgr.blockPool))
|
||||
|
||||
// Since we processed 3 blocks and the sync manager started
|
||||
//looking for block with index 10. The syncmananger should be looking for
|
||||
// the block with index 13
|
||||
assert.Equal(t, uint32(13), syncmgr.nextBlockIndex)
|
||||
}
|
44
_pkg.dev/syncmgr/config.go
Normal file
44
_pkg.dev/syncmgr/config.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
// Config is the configuration file for the sync manager
|
||||
type Config struct {
|
||||
|
||||
// Chain functions
|
||||
ProcessBlock func(block payload.Block) error
|
||||
ProcessHeaders func(hdrs []*payload.BlockBase) error
|
||||
|
||||
// RequestHeaders will send a getHeaders request
|
||||
// with the hash passed in as a parameter
|
||||
RequestHeaders func(hash util.Uint256) error
|
||||
|
||||
//RequestBlock will send a getdata request for the block
|
||||
// with the hash passed as a parameter
|
||||
RequestBlock func(hash util.Uint256, index uint32) error
|
||||
|
||||
// GetNextBlockHash returns the block hash of the header infront of thr block
|
||||
// at the tip of this nodes chain. This assumes that the node is not in sync
|
||||
GetNextBlockHash func() (util.Uint256, error)
|
||||
|
||||
// AskForNewBlocks will send out a message to the network
|
||||
// asking for new blocks
|
||||
AskForNewBlocks func()
|
||||
|
||||
// FetchHeadersAgain is called when a peer has provided headers that have not
|
||||
// validated properly. We pass in the hash of the first header
|
||||
FetchHeadersAgain func(util.Uint256) error
|
||||
|
||||
// FetchHeadersAgain is called when a peer has provided a block that has not
|
||||
// validated properly. We pass in the hash of the block
|
||||
FetchBlockAgain func(util.Uint256) error
|
||||
}
|
||||
|
||||
// SyncPeer represents a peer on the network
|
||||
// that this node can sync with
|
||||
type SyncPeer interface {
|
||||
Height() uint32
|
||||
}
|
42
_pkg.dev/syncmgr/headermode.go
Normal file
42
_pkg.dev/syncmgr/headermode.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/chain"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
)
|
||||
|
||||
// headersModeOnHeaders is called when the sync manager is headers mode
|
||||
// and receives a header.
|
||||
func (s *Syncmgr) headersModeOnHeaders(peer SyncPeer, hdrs []*payload.BlockBase) error {
|
||||
// If we are in Headers mode, then we just need to process the headers
|
||||
// Note: For the un-optimised version, we move straight to blocksOnly mode
|
||||
|
||||
firstHash := hdrs[0].Hash
|
||||
firstHdrIndex := hdrs[0].Index
|
||||
|
||||
err := s.cfg.ProcessHeaders(hdrs)
|
||||
if err == nil {
|
||||
// Update syncmgr last header
|
||||
s.headerHash = hdrs[len(hdrs)-1].Hash
|
||||
|
||||
s.syncmode = blockMode
|
||||
return s.cfg.RequestBlock(firstHash, firstHdrIndex)
|
||||
}
|
||||
|
||||
// Check whether it is a validation error, or a database error
|
||||
if _, ok := err.(*chain.ValidationError); ok {
|
||||
// If we get a validation error we re-request the headers
|
||||
// the method will automatically fetch from a different peer
|
||||
// XXX: Add increment banScore for this peer
|
||||
return s.cfg.FetchHeadersAgain(firstHash)
|
||||
}
|
||||
// This means it is a database error. We have no way to recover from this.
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
// headersModeOnBlock is called when the sync manager is headers mode
|
||||
// and receives a block.
|
||||
func (s *Syncmgr) headersModeOnBlock(peer SyncPeer, block payload.Block) error {
|
||||
// While in headers mode, ignore any blocks received
|
||||
return nil
|
||||
}
|
113
_pkg.dev/syncmgr/mockhelpers_test.go
Normal file
113
_pkg.dev/syncmgr/mockhelpers_test.go
Normal file
|
@ -0,0 +1,113 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
type syncTestHelper struct {
|
||||
blocksProcessed int
|
||||
headersProcessed int
|
||||
newBlockRequest int
|
||||
headersFetchRequest int
|
||||
blockFetchRequest int
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *syncTestHelper) ProcessBlock(msg payload.Block) error {
|
||||
s.blocksProcessed++
|
||||
return s.err
|
||||
}
|
||||
func (s *syncTestHelper) ProcessHeaders(hdrs []*payload.BlockBase) error {
|
||||
s.headersProcessed = s.headersProcessed + len(hdrs)
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *syncTestHelper) GetNextBlockHash() (util.Uint256, error) {
|
||||
return util.Uint256{}, s.err
|
||||
}
|
||||
|
||||
func (s *syncTestHelper) AskForNewBlocks() {
|
||||
s.newBlockRequest++
|
||||
}
|
||||
|
||||
func (s *syncTestHelper) FetchHeadersAgain(util.Uint256) error {
|
||||
s.headersFetchRequest++
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *syncTestHelper) FetchBlockAgain(util.Uint256) error {
|
||||
s.blockFetchRequest++
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *syncTestHelper) RequestBlock(util.Uint256, uint32) error {
|
||||
s.blockFetchRequest++
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *syncTestHelper) RequestHeaders(util.Uint256) error {
|
||||
s.headersFetchRequest++
|
||||
return s.err
|
||||
}
|
||||
|
||||
type mockPeer struct {
|
||||
height uint32
|
||||
}
|
||||
|
||||
func (p *mockPeer) Height() uint32 { return p.height }
|
||||
|
||||
func randomHeadersMessage(t *testing.T, num int) *payload.HeadersMessage {
|
||||
var hdrs []*payload.BlockBase
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
hash := randomUint256(t)
|
||||
hdr := &payload.BlockBase{Hash: hash}
|
||||
hdrs = append(hdrs, hdr)
|
||||
}
|
||||
|
||||
hdrsMsg, err := payload.NewHeadersMessage()
|
||||
assert.Nil(t, err)
|
||||
|
||||
hdrsMsg.Headers = hdrs
|
||||
|
||||
return hdrsMsg
|
||||
}
|
||||
|
||||
func randomUint256(t *testing.T) util.Uint256 {
|
||||
hash := make([]byte, 32)
|
||||
_, err := rand.Read(hash)
|
||||
assert.Nil(t, err)
|
||||
|
||||
u, err := util.Uint256DecodeBytes(hash)
|
||||
assert.Nil(t, err)
|
||||
return u
|
||||
}
|
||||
|
||||
func setupSyncMgr(mode mode, nextBlockIndex uint32) (*Syncmgr, *syncTestHelper) {
|
||||
helper := &syncTestHelper{}
|
||||
|
||||
cfg := &Config{
|
||||
ProcessBlock: helper.ProcessBlock,
|
||||
ProcessHeaders: helper.ProcessHeaders,
|
||||
|
||||
GetNextBlockHash: helper.GetNextBlockHash,
|
||||
AskForNewBlocks: helper.AskForNewBlocks,
|
||||
|
||||
FetchHeadersAgain: helper.FetchHeadersAgain,
|
||||
FetchBlockAgain: helper.FetchBlockAgain,
|
||||
|
||||
RequestBlock: helper.RequestBlock,
|
||||
RequestHeaders: helper.RequestHeaders,
|
||||
}
|
||||
|
||||
syncmgr := New(cfg, nextBlockIndex)
|
||||
syncmgr.syncmode = mode
|
||||
|
||||
return syncmgr, helper
|
||||
}
|
60
_pkg.dev/syncmgr/normalmode.go
Normal file
60
_pkg.dev/syncmgr/normalmode.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
)
|
||||
|
||||
func (s *Syncmgr) normalModeOnHeaders(peer SyncPeer, hdrs []*payload.BlockBase) error {
|
||||
// If in normal mode, first process the headers
|
||||
err := s.cfg.ProcessHeaders(hdrs)
|
||||
if err != nil {
|
||||
// If something went wrong with processing the headers
|
||||
// Ask another peer for the headers.
|
||||
//XXX: Increment banscore for this peer
|
||||
return s.cfg.FetchHeadersAgain(hdrs[0].Hash)
|
||||
}
|
||||
|
||||
lenHeaders := len(hdrs)
|
||||
firstHash := hdrs[0].Hash
|
||||
firstHdrIndex := hdrs[0].Index
|
||||
lastHash := hdrs[lenHeaders-1].Hash
|
||||
|
||||
// Update syncmgr latest header
|
||||
s.headerHash = lastHash
|
||||
|
||||
// If there are 2k headers, then ask for more headers and switch back to headers mode.
|
||||
if lenHeaders == 2000 {
|
||||
s.syncmode = headersMode
|
||||
return s.cfg.RequestHeaders(lastHash)
|
||||
}
|
||||
|
||||
// Ask for the corresponding block iff there is < 2k headers
|
||||
// then switch to blocksMode
|
||||
// Bounds state that len > 1 && len!= 2000 & maxHeadersInMessage == 2000
|
||||
// This means that we have less than 2k headers
|
||||
s.syncmode = blockMode
|
||||
return s.cfg.RequestBlock(firstHash, firstHdrIndex)
|
||||
}
|
||||
|
||||
// normalModeOnBlock is called when the sync manager is normal mode
|
||||
// and receives a block.
|
||||
func (s *Syncmgr) normalModeOnBlock(peer SyncPeer, block payload.Block) error {
|
||||
// stop the timer that periodically asks for blocks
|
||||
s.timer.Stop()
|
||||
|
||||
// process block
|
||||
err := s.processBlock(block)
|
||||
if err != nil {
|
||||
s.timer.Reset(blockTimer)
|
||||
return s.cfg.FetchBlockAgain(block.Hash)
|
||||
}
|
||||
|
||||
diff := peer.Height() - block.Index
|
||||
if diff > trailingHeight {
|
||||
s.syncmode = headersMode
|
||||
return s.cfg.RequestHeaders(block.Hash)
|
||||
}
|
||||
|
||||
s.timer.Reset(blockTimer)
|
||||
return nil
|
||||
}
|
152
_pkg.dev/syncmgr/syncmgr.go
Normal file
152
_pkg.dev/syncmgr/syncmgr.go
Normal file
|
@ -0,0 +1,152 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
type mode uint8
|
||||
|
||||
// Note: this is the unoptimised version without parallel sync
|
||||
// The algorithm for the unoptimsied version is simple:
|
||||
// Download 2000 headers, then download the blocks for those headers
|
||||
// Once those blocks are downloaded, we repeat the process again
|
||||
// Until we are nomore than one block behind the tip.
|
||||
// Once this happens, we switch into normal mode.
|
||||
//In normal mode, we have a timer on for X seconds and ask nodes for blocks and also to doublecheck
|
||||
// if we are behind once the timer runs out.
|
||||
// The timer restarts whenever we receive a block.
|
||||
// The parameter X should be approximately the time it takes the network to reach consensus
|
||||
|
||||
//blockTimer approximates to how long it takes to reach consensus and propagate
|
||||
// a block in the network. Once a node has synchronised with the network, he will
|
||||
// ask the network for a newblock every blockTimer
|
||||
const blockTimer = 20 * time.Second
|
||||
|
||||
// trailingHeight indicates how many blocks the node has to be behind by
|
||||
// before he switches to headersMode.
|
||||
const trailingHeight = 100
|
||||
|
||||
// indicates how many blocks the node has to be behind by
|
||||
// before he switches to normalMode and fetches blocks every X seconds.
|
||||
const cruiseHeight = 0
|
||||
|
||||
const (
|
||||
headersMode mode = 1
|
||||
blockMode mode = 2
|
||||
normalMode mode = 3
|
||||
)
|
||||
|
||||
//Syncmgr keeps the node in sync with the rest of the network
|
||||
type Syncmgr struct {
|
||||
syncmode mode
|
||||
cfg *Config
|
||||
timer *time.Timer
|
||||
|
||||
// headerHash is the hash of the last header in the last OnHeaders message that we received.
|
||||
// When receiving blocks, we can use this to determine whether the node has downloaded
|
||||
// all of the blocks for the last headers messages
|
||||
headerHash util.Uint256
|
||||
|
||||
poolLock sync.Mutex
|
||||
blockPool []payload.Block
|
||||
nextBlockIndex uint32
|
||||
}
|
||||
|
||||
// New creates a new sync manager
|
||||
func New(cfg *Config, nextBlockIndex uint32) *Syncmgr {
|
||||
|
||||
newBlockTimer := time.AfterFunc(blockTimer, func() {
|
||||
cfg.AskForNewBlocks()
|
||||
})
|
||||
newBlockTimer.Stop()
|
||||
|
||||
return &Syncmgr{
|
||||
syncmode: headersMode,
|
||||
cfg: cfg,
|
||||
timer: newBlockTimer,
|
||||
nextBlockIndex: nextBlockIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// OnHeader is called when the node receives a headers message
|
||||
func (s *Syncmgr) OnHeader(peer SyncPeer, msg *payload.HeadersMessage) error {
|
||||
|
||||
// XXX(Optimisation): First check if we actually need these headers
|
||||
// Check the last header in msg and then check what our latest header that was saved is
|
||||
// If our latest header is above the lastHeader, then we do not save it
|
||||
// We could also have that our latest header is above only some of the headers.
|
||||
// In this case, we should remove the headers that we already have
|
||||
|
||||
if len(msg.Headers) == 0 {
|
||||
// XXX: Increment banScore for this peer, for sending empty headers message
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
switch s.syncmode {
|
||||
case headersMode:
|
||||
err = s.headersModeOnHeaders(peer, msg.Headers)
|
||||
case blockMode:
|
||||
err = s.blockModeOnHeaders(peer, msg.Headers)
|
||||
case normalMode:
|
||||
err = s.normalModeOnHeaders(peer, msg.Headers)
|
||||
default:
|
||||
err = s.headersModeOnHeaders(peer, msg.Headers)
|
||||
}
|
||||
|
||||
// XXX(Kev):The only meaningful error here would be if the peer
|
||||
// we re-requested blocks from failed. In the next iteration, this will be handled
|
||||
// by the peer manager, who will only return an error, if we are connected to no peers.
|
||||
// Upon re-alising this, the node will then send out GetAddresses to the network and
|
||||
// syncing will be resumed, once we find peers to connect to.
|
||||
|
||||
hdr := msg.Headers[len(msg.Headers)-1]
|
||||
fmt.Printf("Finished processing headers. LastHash in set was: %s\n ", hdr.Hash.ReverseString())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// OnBlock is called when the node receives a block
|
||||
func (s *Syncmgr) OnBlock(peer SyncPeer, msg *payload.BlockMessage) error {
|
||||
fmt.Printf("Block received with height %d\n", msg.Block.Index)
|
||||
|
||||
var err error
|
||||
|
||||
switch s.syncmode {
|
||||
case headersMode:
|
||||
err = s.headersModeOnBlock(peer, msg.Block)
|
||||
case blockMode:
|
||||
err = s.blockModeOnBlock(peer, msg.Block)
|
||||
case normalMode:
|
||||
err = s.normalModeOnBlock(peer, msg.Block)
|
||||
default:
|
||||
err = s.headersModeOnBlock(peer, msg.Block)
|
||||
}
|
||||
|
||||
fmt.Printf("Processed Block with height %d\n", msg.Block.Index)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//IsCurrent returns true if the node is currently
|
||||
// synced up with the network
|
||||
func (s *Syncmgr) IsCurrent() bool {
|
||||
return s.syncmode == normalMode
|
||||
}
|
||||
|
||||
func (s *Syncmgr) processBlock(block payload.Block) error {
|
||||
err := s.cfg.ProcessBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.nextBlockIndex++
|
||||
|
||||
return nil
|
||||
}
|
97
_pkg.dev/syncmgr/syncmgr_onblock_test.go
Normal file
97
_pkg.dev/syncmgr/syncmgr_onblock_test.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/chain"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/payload"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHeadersModeOnBlock(t *testing.T) {
|
||||
|
||||
syncmgr, helper := setupSyncMgr(headersMode, 0)
|
||||
|
||||
syncmgr.OnBlock(&mockPeer{}, randomBlockMessage(t, 0))
|
||||
|
||||
// In headerMode, we do nothing
|
||||
assert.Equal(t, 0, helper.blocksProcessed)
|
||||
}
|
||||
|
||||
func TestBlockModeOnBlock(t *testing.T) {
|
||||
|
||||
syncmgr, helper := setupSyncMgr(blockMode, 0)
|
||||
|
||||
syncmgr.OnBlock(&mockPeer{}, randomBlockMessage(t, 0))
|
||||
|
||||
// When a block is received in blockMode, it is processed
|
||||
assert.Equal(t, 1, helper.blocksProcessed)
|
||||
}
|
||||
func TestNormalModeOnBlock(t *testing.T) {
|
||||
|
||||
syncmgr, helper := setupSyncMgr(normalMode, 0)
|
||||
|
||||
syncmgr.OnBlock(&mockPeer{}, randomBlockMessage(t, 0))
|
||||
|
||||
// When a block is received in normal, it is processed
|
||||
assert.Equal(t, 1, helper.blocksProcessed)
|
||||
}
|
||||
|
||||
func TestBlockModeToNormalMode(t *testing.T) {
|
||||
|
||||
syncmgr, _ := setupSyncMgr(blockMode, 100)
|
||||
|
||||
peer := &mockPeer{
|
||||
height: 100,
|
||||
}
|
||||
|
||||
blkMessage := randomBlockMessage(t, 100)
|
||||
|
||||
syncmgr.OnBlock(peer, blkMessage)
|
||||
|
||||
// We should switch to normal mode, since the block
|
||||
//we received is close to the height of the peer. See cruiseHeight
|
||||
assert.Equal(t, normalMode, syncmgr.syncmode)
|
||||
|
||||
}
|
||||
func TestBlockModeStayInBlockMode(t *testing.T) {
|
||||
|
||||
syncmgr, _ := setupSyncMgr(blockMode, 0)
|
||||
|
||||
// We need our latest know hash to not be equal to the hash
|
||||
// of the block we received, to stay in blockmode
|
||||
syncmgr.headerHash = randomUint256(t)
|
||||
|
||||
peer := &mockPeer{
|
||||
height: 2000,
|
||||
}
|
||||
|
||||
blkMessage := randomBlockMessage(t, 100)
|
||||
|
||||
syncmgr.OnBlock(peer, blkMessage)
|
||||
|
||||
// We should stay in block mode, since the block we received is
|
||||
// still quite far behind the peers height
|
||||
assert.Equal(t, blockMode, syncmgr.syncmode)
|
||||
}
|
||||
func TestBlockModeAlreadyExistsErr(t *testing.T) {
|
||||
|
||||
syncmgr, helper := setupSyncMgr(blockMode, 100)
|
||||
helper.err = chain.ErrBlockAlreadyExists
|
||||
|
||||
syncmgr.OnBlock(&mockPeer{}, randomBlockMessage(t, 100))
|
||||
|
||||
assert.Equal(t, 0, helper.blockFetchRequest)
|
||||
|
||||
// If we have a block already exists in blockmode, then we
|
||||
// switch back to headers mode.
|
||||
assert.Equal(t, headersMode, syncmgr.syncmode)
|
||||
}
|
||||
|
||||
func randomBlockMessage(t *testing.T, height uint32) *payload.BlockMessage {
|
||||
blockMessage, err := payload.NewBlockMessage()
|
||||
blockMessage.BlockBase.Index = height
|
||||
assert.Nil(t, err)
|
||||
return blockMessage
|
||||
}
|
117
_pkg.dev/syncmgr/syncmgr_onheaders_test.go
Normal file
117
_pkg.dev/syncmgr/syncmgr_onheaders_test.go
Normal file
|
@ -0,0 +1,117 @@
|
|||
package syncmgr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/chain"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
func TestHeadersModeOnHeaders(t *testing.T) {
|
||||
|
||||
syncmgr, helper := setupSyncMgr(headersMode, 0)
|
||||
|
||||
syncmgr.OnHeader(&mockPeer{}, randomHeadersMessage(t, 0))
|
||||
|
||||
// Since there were no headers, we should have exited early and processed nothing
|
||||
assert.Equal(t, 0, helper.headersProcessed)
|
||||
|
||||
// ProcessHeaders should have been called once to process all 100 headers
|
||||
syncmgr.OnHeader(&mockPeer{}, randomHeadersMessage(t, 100))
|
||||
assert.Equal(t, 100, helper.headersProcessed)
|
||||
|
||||
// Mode should now be blockMode
|
||||
assert.Equal(t, blockMode, syncmgr.syncmode)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockModeOnHeaders(t *testing.T) {
|
||||
syncmgr, helper := setupSyncMgr(blockMode, 0)
|
||||
|
||||
// If we receive a header in blockmode, no headers will be processed
|
||||
syncmgr.OnHeader(&mockPeer{}, randomHeadersMessage(t, 100))
|
||||
assert.Equal(t, 0, helper.headersProcessed)
|
||||
}
|
||||
func TestNormalModeOnHeadersMaxHeaders(t *testing.T) {
|
||||
syncmgr, helper := setupSyncMgr(normalMode, 0)
|
||||
|
||||
// If we receive a header in normalmode, headers will be processed
|
||||
syncmgr.OnHeader(&mockPeer{}, randomHeadersMessage(t, 2000))
|
||||
assert.Equal(t, 2000, helper.headersProcessed)
|
||||
|
||||
// Mode should now be headersMode since we received 2000 headers
|
||||
assert.Equal(t, headersMode, syncmgr.syncmode)
|
||||
}
|
||||
|
||||
// This differs from the previous function in that
|
||||
//we did not receive the max amount of headers
|
||||
func TestNormalModeOnHeaders(t *testing.T) {
|
||||
syncmgr, helper := setupSyncMgr(normalMode, 0)
|
||||
|
||||
// If we receive a header in normalmode, headers will be processed
|
||||
syncmgr.OnHeader(&mockPeer{}, randomHeadersMessage(t, 200))
|
||||
assert.Equal(t, 200, helper.headersProcessed)
|
||||
|
||||
// Because we did not receive 2000 headers, we switch to blockMode
|
||||
assert.Equal(t, blockMode, syncmgr.syncmode)
|
||||
}
|
||||
|
||||
func TestLastHeaderUpdates(t *testing.T) {
|
||||
syncmgr, _ := setupSyncMgr(headersMode, 0)
|
||||
|
||||
hdrsMessage := randomHeadersMessage(t, 200)
|
||||
hdrs := hdrsMessage.Headers
|
||||
lastHeader := hdrs[len(hdrs)-1]
|
||||
|
||||
syncmgr.OnHeader(&mockPeer{}, hdrsMessage)
|
||||
|
||||
// Headers are processed in headersMode
|
||||
// Last header should be updated
|
||||
assert.True(t, syncmgr.headerHash.Equals(lastHeader.Hash))
|
||||
|
||||
// Change mode to blockMode and reset lastHeader
|
||||
syncmgr.syncmode = blockMode
|
||||
syncmgr.headerHash = util.Uint256{}
|
||||
|
||||
syncmgr.OnHeader(&mockPeer{}, hdrsMessage)
|
||||
|
||||
// header should not be changed
|
||||
assert.False(t, syncmgr.headerHash.Equals(lastHeader.Hash))
|
||||
|
||||
// Change mode to normalMode and reset lastHeader
|
||||
syncmgr.syncmode = normalMode
|
||||
syncmgr.headerHash = util.Uint256{}
|
||||
|
||||
syncmgr.OnHeader(&mockPeer{}, hdrsMessage)
|
||||
|
||||
// headers are processed in normalMode
|
||||
// hash should be updated
|
||||
assert.True(t, syncmgr.headerHash.Equals(lastHeader.Hash))
|
||||
|
||||
}
|
||||
|
||||
func TestHeadersModeOnHeadersErr(t *testing.T) {
|
||||
|
||||
syncmgr, helper := setupSyncMgr(headersMode, 0)
|
||||
helper.err = &chain.ValidationError{}
|
||||
|
||||
syncmgr.OnHeader(&mockPeer{}, randomHeadersMessage(t, 200))
|
||||
|
||||
// On a validation error, we should request for another peer
|
||||
// to send us these headers
|
||||
assert.Equal(t, 1, helper.headersFetchRequest)
|
||||
}
|
||||
|
||||
func TestNormalModeOnHeadersErr(t *testing.T) {
|
||||
syncmgr, helper := setupSyncMgr(normalMode, 0)
|
||||
helper.err = &chain.ValidationError{}
|
||||
|
||||
syncmgr.OnHeader(&mockPeer{}, randomHeadersMessage(t, 200))
|
||||
|
||||
// On a validation error, we should request for another peer
|
||||
// to send us these headers
|
||||
assert.Equal(t, 1, helper.headersFetchRequest)
|
||||
}
|
81
_pkg.dev/vm/csharp-interop-test/push/pushbytes1.json
Normal file
81
_pkg.dev/vm/csharp-interop-test/push/pushbytes1.json
Normal file
|
@ -0,0 +1,81 @@
|
|||
{
|
||||
"category": "Push",
|
||||
"name": "PUSHBYTES1",
|
||||
"tests":
|
||||
[
|
||||
{
|
||||
"name": "Good definition",
|
||||
"script": "0x0100",
|
||||
"steps":
|
||||
[
|
||||
{
|
||||
"actions":
|
||||
[
|
||||
"StepInto"
|
||||
],
|
||||
"result":
|
||||
{
|
||||
"state": "Break",
|
||||
"invocationStack":
|
||||
[
|
||||
{
|
||||
"scriptHash": "0xFBC22D517F38E7612798ECE8E5957CF6C41D8CAF",
|
||||
"instructionPointer": 2,
|
||||
"nextInstruction": "RET",
|
||||
"evaluationStack":
|
||||
[
|
||||
{
|
||||
"type": "ByteArray",
|
||||
"value": "0x00"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"actions":
|
||||
[
|
||||
"StepInto"
|
||||
],
|
||||
"result":
|
||||
{
|
||||
"state": "Halt",
|
||||
"resultStack":
|
||||
[
|
||||
{
|
||||
"type": "ByteArray",
|
||||
"value": "0x00"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Wrong definition (without enough length)",
|
||||
"script": "0x01",
|
||||
"steps":
|
||||
[
|
||||
{
|
||||
"actions":
|
||||
[
|
||||
"StepInto"
|
||||
],
|
||||
"result":
|
||||
{
|
||||
"state": "Fault",
|
||||
"invocationStack":
|
||||
[
|
||||
{
|
||||
"scriptHash": "0xC51B66BCED5E4491001BD702669770DCCF440982",
|
||||
"instructionPointer": 1,
|
||||
"nextInstruction": "RET"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
6
_pkg.dev/vm/csharp-interop-test/readme.md
Normal file
6
_pkg.dev/vm/csharp-interop-test/readme.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
## Package VM Interop
|
||||
|
||||
|
||||
This package will use the tests in the neo-vm repo to test interopabilty
|
||||
|
||||
|
26
_pkg.dev/vm/csharp-interop-test/testStruct.go
Normal file
26
_pkg.dev/vm/csharp-interop-test/testStruct.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
package csharpinterop
|
||||
|
||||
// VMUnitTest is a struct for capturing the fields in the json files
|
||||
type VMUnitTest struct {
|
||||
Category string `json:"category"`
|
||||
Name string `json:"name"`
|
||||
Tests []struct {
|
||||
Name string `json:"name"`
|
||||
Script string `json:"script"`
|
||||
Steps []struct {
|
||||
Actions []string `json:"actions"`
|
||||
Result struct {
|
||||
State string `json:"state"`
|
||||
InvocationStack []struct {
|
||||
ScriptHash string `json:"scriptHash"`
|
||||
InstructionPointer int `json:"instructionPointer"`
|
||||
NextInstruction string `json:"nextInstruction"`
|
||||
EvaluationStack []struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
} `json:"evaluationStack"`
|
||||
} `json:"invocationStack"`
|
||||
} `json:"result"`
|
||||
} `json:"steps"`
|
||||
} `json:"tests"`
|
||||
}
|
202
_pkg.dev/vm/stack/Int.go
Normal file
202
_pkg.dev/vm/stack/Int.go
Normal file
|
@ -0,0 +1,202 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Int represents an integer on the stack
|
||||
type Int struct {
|
||||
*abstractItem
|
||||
val *big.Int
|
||||
}
|
||||
|
||||
// NewInt will convert a big integer into
|
||||
// a StackInteger
|
||||
func NewInt(val *big.Int) (*Int, error) {
|
||||
return &Int{
|
||||
abstractItem: &abstractItem{},
|
||||
val: val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Equal will check if two integers hold equal value
|
||||
func (i *Int) Equal(s *Int) bool {
|
||||
return i.val.Cmp(s.val) == 0
|
||||
}
|
||||
|
||||
// Add will add two stackIntegers together
|
||||
func (i *Int) Add(s *Int) (*Int, error) {
|
||||
return &Int{
|
||||
val: new(big.Int).Add(i.val, s.val),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Sub will subtract two stackIntegers together
|
||||
func (i *Int) Sub(s *Int) (*Int, error) {
|
||||
return &Int{
|
||||
val: new(big.Int).Sub(i.val, s.val),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Mul will multiply two stackIntegers together
|
||||
func (i *Int) Mul(s *Int) (*Int, error) {
|
||||
return &Int{
|
||||
val: new(big.Int).Mul(i.val, s.val),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Div will divide one stackInteger by an other.
|
||||
func (i *Int) Div(s *Int) (*Int, error) {
|
||||
return &Int{
|
||||
val: new(big.Int).Div(i.val, s.val),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Mod will take the mod of two stackIntegers together
|
||||
func (i *Int) Mod(s *Int) (*Int, error) {
|
||||
return &Int{
|
||||
val: new(big.Int).Mod(i.val, s.val),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Rsh will shift the integer b to the right by `n` bits
|
||||
func (i *Int) Rsh(n *Int) (*Int, error) {
|
||||
return &Int{
|
||||
val: new(big.Int).Rsh(i.val, uint(n.val.Int64())),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Lsh will shift the integer b to the left by `n` bits
|
||||
func (i *Int) Lsh(n *Int) (*Int, error) {
|
||||
return &Int{
|
||||
val: new(big.Int).Lsh(i.val, uint(n.val.Int64())),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Integer will overwrite the default implementation
|
||||
// to allow go to cast this item as an integer.
|
||||
func (i *Int) Integer() (*Int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// ByteArray override the default ByteArray method
|
||||
// to convert a Integer into a byte Array
|
||||
func (i *Int) ByteArray() (*ByteArray, error) {
|
||||
b := i.val.Bytes()
|
||||
dest := reverse(b)
|
||||
return NewByteArray(dest), nil
|
||||
}
|
||||
|
||||
//Boolean override the default Boolean method
|
||||
// to convert an Integer into a Boolean StackItem
|
||||
func (i *Int) Boolean() (*Boolean, error) {
|
||||
boolean := (i.val.Int64() != 0)
|
||||
return NewBoolean(boolean), nil
|
||||
}
|
||||
|
||||
//Value returns the underlying big.Int
|
||||
func (i *Int) Value() *big.Int {
|
||||
return i.val
|
||||
}
|
||||
|
||||
// Abs returns a stack integer whose underlying value is
|
||||
// the absolute value of the original stack integer.
|
||||
func (i *Int) Abs() (*Int, error) {
|
||||
a := big.NewInt(0).Abs(i.Value())
|
||||
b, err := NewInt(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Lte returns a bool value from the comparison of two integers, a and b.
|
||||
// value is true if a <= b.
|
||||
// value is false if a > b.
|
||||
func (i *Int) Lte(s *Int) bool {
|
||||
return i.Value().Cmp(s.Value()) != 1
|
||||
}
|
||||
|
||||
// Gte returns a bool value from the comparison of two integers, a and b.
|
||||
// value is true if a >= b.
|
||||
// value is false if a < b.
|
||||
func (i *Int) Gte(s *Int) bool {
|
||||
return i.Value().Cmp(s.Value()) != -1
|
||||
}
|
||||
|
||||
// Lt returns a bool value from the comparison of two integers, a and b.
|
||||
// value is true if a < b.
|
||||
// value is false if a >= b.
|
||||
func (i *Int) Lt(s *Int) bool {
|
||||
return i.Value().Cmp(s.Value()) == -1
|
||||
}
|
||||
|
||||
// Gt returns a bool value from the comparison of two integers, a and b.
|
||||
// value is true if a > b.
|
||||
// value is false if a <= b.
|
||||
func (i *Int) Gt(s *Int) bool {
|
||||
return i.Value().Cmp(s.Value()) == 1
|
||||
}
|
||||
|
||||
// Invert returns an Integer whose underlying value is the bitwise complement
|
||||
// of the original value.
|
||||
func (i *Int) Invert() (*Int, error) {
|
||||
res := new(big.Int).Not(i.Value())
|
||||
return NewInt(res)
|
||||
}
|
||||
|
||||
// And returns an Integer whose underlying value is the result of the
|
||||
// application of the bitwise AND operator to the two original integers'
|
||||
// values.
|
||||
func (i *Int) And(s *Int) (*Int, error) {
|
||||
res := new(big.Int).And(i.Value(), s.Value())
|
||||
return NewInt(res)
|
||||
}
|
||||
|
||||
// Or returns an Integer whose underlying value is the result of the
|
||||
// application of the bitwise OR operator to the two original integers'
|
||||
// values.
|
||||
func (i *Int) Or(s *Int) (*Int, error) {
|
||||
res := new(big.Int).Or(i.Value(), s.Value())
|
||||
return NewInt(res)
|
||||
}
|
||||
|
||||
// Xor returns an Integer whose underlying value is the result of the
|
||||
// application of the bitwise XOR operator to the two original integers'
|
||||
// values.
|
||||
func (i *Int) Xor(s *Int) (*Int, error) {
|
||||
res := new(big.Int).Xor(i.Value(), s.Value())
|
||||
return NewInt(res)
|
||||
}
|
||||
|
||||
// Hash overrides the default abstract hash method.
|
||||
func (i *Int) Hash() (string, error) {
|
||||
data := fmt.Sprintf("%T %v", i, i.Value())
|
||||
return KeyGenerator([]byte(data))
|
||||
}
|
||||
|
||||
// Min returns the mininum between two integers.
|
||||
func Min(a *Int, b *Int) *Int {
|
||||
if a.Lte(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Max returns the maximun between two integers.
|
||||
func Max(a *Int, b *Int) *Int {
|
||||
if a.Gte(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Within returns a bool whose value is true
|
||||
// iff the value of the integer i is within the specified
|
||||
// range [a,b) (left-inclusive).
|
||||
func (i *Int) Within(a *Int, b *Int) bool {
|
||||
// i >= a && i < b
|
||||
return i.Gte(a) && i.Lt(b)
|
||||
}
|
24
_pkg.dev/vm/stack/Readme.md
Normal file
24
_pkg.dev/vm/stack/Readme.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
## VM - Stack
|
||||
|
||||
- How do i implement a new StackItem?
|
||||
|
||||
Answer: You add it's type to the Item interface, then you implement the default return method on the abstract stack item, this should be the behaviour of the stack item, if it is not the new type. Then you embed the abstract item in the new struct and override the method.
|
||||
|
||||
For example, If I wanted to add a new type called `HashMap`
|
||||
|
||||
type Item interface{
|
||||
HashMap()(*HashMap, error)
|
||||
}
|
||||
|
||||
func (a *abstractItem) HashMap() (*HashMap, error) {
|
||||
return nil, errors.New(This stack item is not a hashmap)
|
||||
}
|
||||
|
||||
type HashMap struct {
|
||||
*abstractItem
|
||||
// Variables needed for hashmap
|
||||
}
|
||||
|
||||
func (h *HashMap) HashMap()(*HashMap, error) {
|
||||
// logic to override default behaviour
|
||||
}
|
36
_pkg.dev/vm/stack/array.go
Normal file
36
_pkg.dev/vm/stack/array.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Array represents an Array of stackItems on the stack
|
||||
type Array struct {
|
||||
*abstractItem
|
||||
val []Item
|
||||
}
|
||||
|
||||
// Array overrides the default implementation
|
||||
// by the abstractItem, returning an Array struct
|
||||
func (a *Array) Array() (*Array, error) {
|
||||
return a, nil
|
||||
}
|
||||
|
||||
//Value returns the underlying Array's value
|
||||
func (a *Array) Value() []Item {
|
||||
return a.val
|
||||
}
|
||||
|
||||
// NewArray returns a new Array.
|
||||
func NewArray(val []Item) (*Array, error) {
|
||||
return &Array{
|
||||
&abstractItem{},
|
||||
val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Hash overrides the default abstract hash method.
|
||||
func (a *Array) Hash() (string, error) {
|
||||
data := fmt.Sprintf("%T %v", a, a.Value())
|
||||
return KeyGenerator([]byte(data))
|
||||
}
|
16
_pkg.dev/vm/stack/array_test.go
Normal file
16
_pkg.dev/vm/stack/array_test.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
// it's a stub at the moment, but will need it anyway
|
||||
// "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestArray(t *testing.T) {
|
||||
var a Item = testMakeStackInt(t, 3)
|
||||
var b Item = testMakeStackInt(t, 6)
|
||||
var c Item = testMakeStackInt(t, 9)
|
||||
var ta = testMakeArray(t, []Item{a, b, c})
|
||||
_ = ta
|
||||
}
|
56
_pkg.dev/vm/stack/boolean.go
Normal file
56
_pkg.dev/vm/stack/boolean.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Boolean represents a boolean value on the stack
|
||||
type Boolean struct {
|
||||
*abstractItem
|
||||
val bool
|
||||
}
|
||||
|
||||
//NewBoolean returns a new boolean stack item
|
||||
func NewBoolean(val bool) *Boolean {
|
||||
return &Boolean{
|
||||
&abstractItem{},
|
||||
val,
|
||||
}
|
||||
}
|
||||
|
||||
// Boolean overrides the default implementation
|
||||
// by the abstractItem, returning a Boolean struct
|
||||
func (b *Boolean) Boolean() (*Boolean, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Value returns the underlying boolean value
|
||||
func (b *Boolean) Value() bool {
|
||||
return b.val
|
||||
}
|
||||
|
||||
// Not returns a Boolean whose underlying value is flipped.
|
||||
// If the value is True, it is flipped to False and viceversa
|
||||
func (b *Boolean) Not() *Boolean {
|
||||
return NewBoolean(!b.Value())
|
||||
}
|
||||
|
||||
// And returns a Boolean whose underlying value is obtained
|
||||
// by applying the && operator to two Booleans' values.
|
||||
func (b *Boolean) And(a *Boolean) *Boolean {
|
||||
c := b.Value() && a.Value()
|
||||
return NewBoolean(c)
|
||||
}
|
||||
|
||||
// Or returns a Boolean whose underlying value is obtained
|
||||
// by applying the || operator to two Booleans' values.
|
||||
func (b *Boolean) Or(a *Boolean) *Boolean {
|
||||
c := b.Value() || a.Value()
|
||||
return NewBoolean(c)
|
||||
}
|
||||
|
||||
// Hash overrides the default abstract hash method.
|
||||
func (b *Boolean) Hash() (string, error) {
|
||||
data := fmt.Sprintf("%T %v", b, b.Value())
|
||||
return KeyGenerator([]byte(data))
|
||||
}
|
177
_pkg.dev/vm/stack/builder.go
Normal file
177
_pkg.dev/vm/stack/builder.go
Normal file
|
@ -0,0 +1,177 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/wire/util"
|
||||
)
|
||||
|
||||
// Builder follows the builder pattern and will be used to build scripts
|
||||
type Builder struct {
|
||||
w *bytes.Buffer
|
||||
err error
|
||||
}
|
||||
|
||||
// NewBuilder returns a new builder object
|
||||
func NewBuilder() *Builder {
|
||||
return &Builder{
|
||||
w: &bytes.Buffer{},
|
||||
err: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns the byte representation of the built buffer
|
||||
func (br *Builder) Bytes() []byte {
|
||||
return br.w.Bytes()
|
||||
}
|
||||
|
||||
// Emit a VM Opcode with data to the given buffer.
|
||||
func (br *Builder) Emit(op Instruction, b []byte) *Builder {
|
||||
if br.err != nil {
|
||||
return br
|
||||
}
|
||||
br.err = br.w.WriteByte(byte(op))
|
||||
_, br.err = br.w.Write(b)
|
||||
return br
|
||||
}
|
||||
|
||||
// EmitOpcode emits a single VM Opcode the given buffer.
|
||||
func (br *Builder) EmitOpcode(op Instruction) *Builder {
|
||||
if br.err != nil {
|
||||
return br
|
||||
}
|
||||
br.err = br.w.WriteByte(byte(op))
|
||||
return br
|
||||
}
|
||||
|
||||
// EmitBool emits a bool type the given buffer.
|
||||
func (br *Builder) EmitBool(ok bool) *Builder {
|
||||
if br.err != nil {
|
||||
return br
|
||||
}
|
||||
op := PUSHT
|
||||
if !ok {
|
||||
op = PUSHF
|
||||
}
|
||||
return br.EmitOpcode(op)
|
||||
}
|
||||
|
||||
// EmitInt emits a int type to the given buffer.
|
||||
func (br *Builder) EmitInt(i int64) *Builder {
|
||||
if br.err != nil {
|
||||
return br
|
||||
}
|
||||
if i == -1 {
|
||||
return br.EmitOpcode(PUSHM1)
|
||||
}
|
||||
if i == 0 {
|
||||
return br.EmitOpcode(PUSHF)
|
||||
}
|
||||
if i > 0 && i < 16 {
|
||||
val := Instruction(int(PUSH1) - 1 + int(i))
|
||||
return br.EmitOpcode(val)
|
||||
}
|
||||
|
||||
bInt := big.NewInt(i)
|
||||
val := reverse(bInt.Bytes())
|
||||
return br.EmitBytes(val)
|
||||
}
|
||||
|
||||
// EmitString emits a string to the given buffer.
|
||||
func (br *Builder) EmitString(s string) *Builder {
|
||||
if br.err != nil {
|
||||
return br
|
||||
}
|
||||
return br.EmitBytes([]byte(s))
|
||||
}
|
||||
|
||||
// EmitBytes emits a byte array to the given buffer.
|
||||
func (br *Builder) EmitBytes(b []byte) *Builder {
|
||||
if br.err != nil {
|
||||
return br
|
||||
}
|
||||
var (
|
||||
n = len(b)
|
||||
)
|
||||
|
||||
if n <= int(PUSHBYTES75) {
|
||||
return br.Emit(Instruction(n), b)
|
||||
} else if n < 0x100 {
|
||||
br.Emit(PUSHDATA1, []byte{byte(n)})
|
||||
} else if n < 0x10000 {
|
||||
buf := make([]byte, 2)
|
||||
binary.LittleEndian.PutUint16(buf, uint16(n))
|
||||
br.Emit(PUSHDATA2, buf)
|
||||
} else {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, uint32(n))
|
||||
br.Emit(PUSHDATA4, buf)
|
||||
}
|
||||
_, br.err = br.w.Write(b)
|
||||
return br
|
||||
}
|
||||
|
||||
// EmitSyscall emits the syscall API to the given buffer.
|
||||
// Syscall API string cannot be 0.
|
||||
func (br *Builder) EmitSyscall(api string) *Builder {
|
||||
if br.err != nil {
|
||||
return br
|
||||
}
|
||||
if len(api) == 0 {
|
||||
br.err = errors.New("syscall api cannot be of length 0")
|
||||
}
|
||||
buf := make([]byte, len(api)+1)
|
||||
buf[0] = byte(len(api))
|
||||
copy(buf[1:], []byte(api))
|
||||
return br.Emit(SYSCALL, buf)
|
||||
}
|
||||
|
||||
// EmitCall emits a call Opcode with label to the given buffer.
|
||||
func (br *Builder) EmitCall(op Instruction, label int16) *Builder {
|
||||
return br.EmitJmp(op, label)
|
||||
}
|
||||
|
||||
// EmitJmp emits a jump Opcode along with label to the given buffer.
|
||||
func (br *Builder) EmitJmp(op Instruction, label int16) *Builder {
|
||||
if !isOpcodeJmp(op) {
|
||||
br.err = fmt.Errorf("opcode %d is not a jump or call type", op)
|
||||
}
|
||||
buf := make([]byte, 2)
|
||||
binary.LittleEndian.PutUint16(buf, uint16(label))
|
||||
return br.Emit(op, buf)
|
||||
}
|
||||
|
||||
// EmitAppCall emits an appcall, if tailCall is true, tailCall opcode will be
|
||||
// emitted instead.
|
||||
func (br *Builder) EmitAppCall(scriptHash util.Uint160, tailCall bool) *Builder {
|
||||
op := APPCALL
|
||||
if tailCall {
|
||||
op = TAILCALL
|
||||
}
|
||||
return br.Emit(op, scriptHash.Bytes())
|
||||
}
|
||||
|
||||
// EmitAppCallWithOperationAndData emits an appcall with the given operation and data.
|
||||
func (br *Builder) EmitAppCallWithOperationAndData(w *bytes.Buffer, scriptHash util.Uint160, operation string, data []byte) *Builder {
|
||||
br.EmitBytes(data)
|
||||
br.EmitString(operation)
|
||||
return br.EmitAppCall(scriptHash, false)
|
||||
}
|
||||
|
||||
// EmitAppCallWithOperation emits an appcall with the given operation.
|
||||
func (br *Builder) EmitAppCallWithOperation(scriptHash util.Uint160, operation string) *Builder {
|
||||
br.EmitBool(false)
|
||||
br.EmitString(operation)
|
||||
return br.EmitAppCall(scriptHash, false)
|
||||
}
|
||||
|
||||
func isOpcodeJmp(op Instruction) bool {
|
||||
if op == JMP || op == JMPIFNOT || op == JMPIF || op == CALL {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
83
_pkg.dev/vm/stack/bytearray.go
Normal file
83
_pkg.dev/vm/stack/bytearray.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ByteArray represents a slice of bytes on the stack
|
||||
type ByteArray struct {
|
||||
*abstractItem
|
||||
val []byte
|
||||
}
|
||||
|
||||
//NewByteArray returns a ByteArray stack item
|
||||
// given a byte slice
|
||||
func NewByteArray(val []byte) *ByteArray {
|
||||
return &ByteArray{
|
||||
&abstractItem{},
|
||||
val,
|
||||
}
|
||||
}
|
||||
|
||||
//ByteArray overrides the default abstractItem Bytes array method
|
||||
func (ba *ByteArray) ByteArray() (*ByteArray, error) {
|
||||
return ba, nil
|
||||
}
|
||||
|
||||
//Equals returns true, if two bytearrays are equal
|
||||
func (ba *ByteArray) Equals(other *ByteArray) *Boolean {
|
||||
// If either are nil, return false
|
||||
if ba == nil || other == nil {
|
||||
return NewBoolean(false)
|
||||
}
|
||||
return NewBoolean(bytes.Equal(ba.val, other.val))
|
||||
}
|
||||
|
||||
//Integer overrides the default Integer method to convert an
|
||||
// ByteArray Into an integer
|
||||
func (ba *ByteArray) Integer() (*Int, error) {
|
||||
dest := reverse(ba.val)
|
||||
integerVal := new(big.Int).SetBytes(dest)
|
||||
return NewInt(integerVal)
|
||||
|
||||
}
|
||||
|
||||
// Boolean will convert a byte array into a boolean stack item
|
||||
func (ba *ByteArray) Boolean() (*Boolean, error) {
|
||||
boolean, err := strconv.ParseBool(string(ba.val))
|
||||
if err != nil {
|
||||
return nil, errors.New("cannot convert byte array to a boolean")
|
||||
}
|
||||
return NewBoolean(boolean), nil
|
||||
}
|
||||
|
||||
// XXX: move this into a pkg/util/slice folder
|
||||
// Go mod not working
|
||||
func reverse(b []byte) []byte {
|
||||
if len(b) < 2 {
|
||||
return b
|
||||
}
|
||||
|
||||
dest := make([]byte, len(b))
|
||||
|
||||
for i, j := 0, len(b)-1; i < j+1; i, j = i+1, j-1 {
|
||||
dest[i], dest[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
//Value returns the underlying ByteArray's value.
|
||||
func (ba *ByteArray) Value() []byte {
|
||||
return ba.val
|
||||
}
|
||||
|
||||
// Hash overrides the default abstract hash method.
|
||||
func (ba *ByteArray) Hash() (string, error) {
|
||||
data := fmt.Sprintf("%T %v", ba, ba.Value())
|
||||
return KeyGenerator([]byte(data))
|
||||
}
|
174
_pkg.dev/vm/stack/context.go
Normal file
174
_pkg.dev/vm/stack/context.go
Normal file
|
@ -0,0 +1,174 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Context represent the current execution context of the VM.
|
||||
// context will be treated as stack item and placed onto the invocation stack
|
||||
type Context struct {
|
||||
*abstractItem
|
||||
|
||||
// Instruction pointer.
|
||||
ip int
|
||||
|
||||
// The raw program script.
|
||||
prog []byte
|
||||
|
||||
// Breakpoints
|
||||
breakPoints []int
|
||||
|
||||
// Evaluation Stack
|
||||
Estack RandomAccess
|
||||
|
||||
// Alternative Stack
|
||||
Astack RandomAccess
|
||||
}
|
||||
|
||||
// NewContext return a new Context object.
|
||||
func NewContext(b []byte) *Context {
|
||||
return &Context{
|
||||
abstractItem: &abstractItem{},
|
||||
ip: -1,
|
||||
prog: b,
|
||||
breakPoints: []int{},
|
||||
}
|
||||
}
|
||||
|
||||
// Context overrides the default implementation
|
||||
// to return a context item
|
||||
func (c *Context) Context() (*Context, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Next return the next instruction to execute.
|
||||
func (c *Context) Next() (Instruction, error) {
|
||||
c.ip++
|
||||
if c.ip >= len(c.prog) {
|
||||
return RET, errors.New("program pointer is more than the length of program. Returning RET OPCODE")
|
||||
}
|
||||
return Instruction(c.prog[c.ip]), nil
|
||||
}
|
||||
|
||||
// IP returns the absolute instruction without taking 0 into account.
|
||||
// If that program starts the ip = 0 but IP() will return 1, cause its
|
||||
// the first instruction.
|
||||
func (c *Context) IP() int {
|
||||
return c.ip + 1
|
||||
}
|
||||
|
||||
// LenInstr returns the number of instructions loaded.
|
||||
func (c *Context) LenInstr() int {
|
||||
return len(c.prog)
|
||||
}
|
||||
|
||||
// CurrInstr returns the current instruction and opcode.
|
||||
func (c *Context) CurrInstr() (int, Instruction) {
|
||||
if c.ip < 0 {
|
||||
return c.ip, Instruction(0x00)
|
||||
}
|
||||
return c.ip, Instruction(c.prog[c.ip])
|
||||
}
|
||||
|
||||
// Copy returns an new exact copy of c.
|
||||
func (c *Context) Copy() *Context {
|
||||
return &Context{
|
||||
ip: c.ip,
|
||||
prog: c.prog,
|
||||
breakPoints: c.breakPoints,
|
||||
}
|
||||
}
|
||||
|
||||
// Program returns the loaded program.
|
||||
func (c *Context) Program() []byte {
|
||||
return c.prog
|
||||
}
|
||||
|
||||
func (c *Context) atBreakPoint() bool {
|
||||
for _, n := range c.breakPoints {
|
||||
if n == c.ip {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Context) String() string {
|
||||
return "execution context"
|
||||
}
|
||||
|
||||
// ReadUint32 reads a uint32 from the script
|
||||
func (c *Context) ReadUint32() uint32 {
|
||||
start, end := c.IP(), c.IP()+4
|
||||
if end > len(c.prog) {
|
||||
return 0
|
||||
}
|
||||
val := binary.LittleEndian.Uint32(c.prog[start:end])
|
||||
c.ip += 4
|
||||
return val
|
||||
}
|
||||
|
||||
// ReadUint16 reads a uint16 from the script
|
||||
func (c *Context) ReadUint16() uint16 {
|
||||
start, end := c.IP(), c.IP()+2
|
||||
if end > len(c.prog) {
|
||||
return 0
|
||||
}
|
||||
val := binary.LittleEndian.Uint16(c.prog[start:end])
|
||||
c.ip += 2
|
||||
return val
|
||||
}
|
||||
|
||||
// ReadInt16 reads a int16 from the script
|
||||
func (c *Context) ReadInt16() int16 {
|
||||
return int16(c.ReadUint16())
|
||||
}
|
||||
|
||||
// ReadByte reads one byte from the script
|
||||
func (c *Context) ReadByte() (byte, error) {
|
||||
byt, err := c.ReadBytes(1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return byt[0], nil
|
||||
}
|
||||
|
||||
// ReadBytes will read n bytes from the context
|
||||
func (c *Context) ReadBytes(n int) ([]byte, error) {
|
||||
start, end := c.IP(), c.IP()+n
|
||||
if end > len(c.prog) {
|
||||
return nil, errors.New("Too many bytes to read, pointer goes past end of program")
|
||||
}
|
||||
|
||||
out := make([]byte, n)
|
||||
copy(out, c.prog[start:end])
|
||||
c.ip += n
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *Context) readVarBytes() ([]byte, error) {
|
||||
n, err := c.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.ReadBytes(int(n))
|
||||
}
|
||||
|
||||
// SetIP sets the instruction pointer ip to a given integer.
|
||||
// Returns an error if ip is less than -1 or greater than LenInstr.
|
||||
func (c *Context) SetIP(ip int) error {
|
||||
if ok := ip < -1 || ip > c.LenInstr(); ok {
|
||||
return errors.New("invalid instruction pointer")
|
||||
}
|
||||
c.ip = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash overrides the default abstract hash method.
|
||||
func (c *Context) Hash() (string, error) {
|
||||
data := c.String() + fmt.Sprintf(" %v-%v-%v-%v-%v", c.ip, c.prog, c.breakPoints, c.Estack, c.Astack)
|
||||
return KeyGenerator([]byte(data))
|
||||
}
|
133
_pkg.dev/vm/stack/instruction.go
Normal file
133
_pkg.dev/vm/stack/instruction.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
package stack
|
||||
|
||||
// Instruction represents a operation code in the neovm
|
||||
type Instruction byte
|
||||
|
||||
// Viable list of supported instruction constants.
|
||||
const (
|
||||
// Constants
|
||||
PUSH0 Instruction = 0x00
|
||||
PUSHF Instruction = PUSH0
|
||||
PUSHBYTES1 Instruction = 0x01
|
||||
PUSHBYTES75 Instruction = 0x4B
|
||||
PUSHDATA1 Instruction = 0x4C
|
||||
PUSHDATA2 Instruction = 0x4D
|
||||
PUSHDATA4 Instruction = 0x4E
|
||||
PUSHM1 Instruction = 0x4F
|
||||
PUSH1 Instruction = 0x51
|
||||
PUSHT Instruction = PUSH1
|
||||
PUSH2 Instruction = 0x52
|
||||
PUSH3 Instruction = 0x53
|
||||
PUSH4 Instruction = 0x54
|
||||
PUSH5 Instruction = 0x55
|
||||
PUSH6 Instruction = 0x56
|
||||
PUSH7 Instruction = 0x57
|
||||
PUSH8 Instruction = 0x58
|
||||
PUSH9 Instruction = 0x59
|
||||
PUSH10 Instruction = 0x5A
|
||||
PUSH11 Instruction = 0x5B
|
||||
PUSH12 Instruction = 0x5C
|
||||
PUSH13 Instruction = 0x5D
|
||||
PUSH14 Instruction = 0x5E
|
||||
PUSH15 Instruction = 0x5F
|
||||
PUSH16 Instruction = 0x60
|
||||
|
||||
// Flow control
|
||||
NOP Instruction = 0x61
|
||||
JMP Instruction = 0x62
|
||||
JMPIF Instruction = 0x63
|
||||
JMPIFNOT Instruction = 0x64
|
||||
CALL Instruction = 0x65
|
||||
RET Instruction = 0x66
|
||||
APPCALL Instruction = 0x67
|
||||
SYSCALL Instruction = 0x68
|
||||
TAILCALL Instruction = 0x69
|
||||
|
||||
// Stack
|
||||
DUPFROMALTSTACK Instruction = 0x6A
|
||||
TOALTSTACK Instruction = 0x6B
|
||||
FROMALTSTACK Instruction = 0x6C
|
||||
XDROP Instruction = 0x6D
|
||||
XSWAP Instruction = 0x72
|
||||
XTUCK Instruction = 0x73
|
||||
DEPTH Instruction = 0x74
|
||||
DROP Instruction = 0x75
|
||||
DUP Instruction = 0x76
|
||||
NIP Instruction = 0x77
|
||||
OVER Instruction = 0x78
|
||||
PICK Instruction = 0x79
|
||||
ROLL Instruction = 0x7A
|
||||
ROT Instruction = 0x7B
|
||||
SWAP Instruction = 0x7C
|
||||
TUCK Instruction = 0x7D
|
||||
|
||||
// Splice
|
||||
CAT Instruction = 0x7E
|
||||
SUBSTR Instruction = 0x7F
|
||||
LEFT Instruction = 0x80
|
||||
RIGHT Instruction = 0x81
|
||||
SIZE Instruction = 0x82
|
||||
|
||||
// Bitwise logic
|
||||
INVERT Instruction = 0x83
|
||||
AND Instruction = 0x84
|
||||
OR Instruction = 0x85
|
||||
XOR Instruction = 0x86
|
||||
EQUAL Instruction = 0x87
|
||||
|
||||
// Arithmetic
|
||||
INC Instruction = 0x8B
|
||||
DEC Instruction = 0x8C
|
||||
SIGN Instruction = 0x8D
|
||||
NEGATE Instruction = 0x8F
|
||||
ABS Instruction = 0x90
|
||||
NOT Instruction = 0x91
|
||||
NZ Instruction = 0x92
|
||||
ADD Instruction = 0x93
|
||||
SUB Instruction = 0x94
|
||||
MUL Instruction = 0x95
|
||||
DIV Instruction = 0x96
|
||||
MOD Instruction = 0x97
|
||||
SHL Instruction = 0x98
|
||||
SHR Instruction = 0x99
|
||||
BOOLAND Instruction = 0x9A
|
||||
BOOLOR Instruction = 0x9B
|
||||
NUMEQUAL Instruction = 0x9C
|
||||
NUMNOTEQUAL Instruction = 0x9E
|
||||
LT Instruction = 0x9F
|
||||
GT Instruction = 0xA0
|
||||
LTE Instruction = 0xA1
|
||||
GTE Instruction = 0xA2
|
||||
MIN Instruction = 0xA3
|
||||
MAX Instruction = 0xA4
|
||||
WITHIN Instruction = 0xA5
|
||||
|
||||
// Crypto
|
||||
SHA1 Instruction = 0xA7
|
||||
SHA256 Instruction = 0xA8
|
||||
HASH160 Instruction = 0xA9
|
||||
HASH256 Instruction = 0xAA
|
||||
CHECKSIG Instruction = 0xAC
|
||||
CHECKMULTISIG Instruction = 0xAE
|
||||
|
||||
// Array
|
||||
ARRAYSIZE Instruction = 0xC0
|
||||
PACK Instruction = 0xC1
|
||||
UNPACK Instruction = 0xC2
|
||||
PICKITEM Instruction = 0xC3
|
||||
SETITEM Instruction = 0xC4
|
||||
NEWARRAY Instruction = 0xC5
|
||||
NEWSTRUCT Instruction = 0xC6
|
||||
APPEND Instruction = 0xC8
|
||||
REVERSE Instruction = 0xC9
|
||||
REMOVE Instruction = 0xCA
|
||||
|
||||
// Exceptions
|
||||
THROW Instruction = 0xF0
|
||||
THROWIFNOT Instruction = 0xF1
|
||||
)
|
||||
|
||||
// Value returns the byte-value of the opcode.
|
||||
func (i Instruction) Value() byte {
|
||||
return byte(i)
|
||||
}
|
74
_pkg.dev/vm/stack/int_test.go
Normal file
74
_pkg.dev/vm/stack/int_test.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
a := testMakeStackInt(t, 10)
|
||||
b := testMakeStackInt(t, 20)
|
||||
expected := testMakeStackInt(t, 30)
|
||||
c, err := a.Add(b)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, expected.Equal(c))
|
||||
}
|
||||
func TestSub(t *testing.T) {
|
||||
a := testMakeStackInt(t, 30)
|
||||
b := testMakeStackInt(t, 200)
|
||||
expected := testMakeStackInt(t, 170)
|
||||
c, err := b.Sub(a)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, expected.Equal(c))
|
||||
}
|
||||
func TestMul(t *testing.T) {
|
||||
a := testMakeStackInt(t, 10)
|
||||
b := testMakeStackInt(t, 20)
|
||||
expected := testMakeStackInt(t, 200)
|
||||
c, err := a.Mul(b)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, expected.Equal(c))
|
||||
}
|
||||
func TestMod(t *testing.T) {
|
||||
a := testMakeStackInt(t, 10)
|
||||
b := testMakeStackInt(t, 20)
|
||||
expected := testMakeStackInt(t, 10)
|
||||
c, err := a.Mod(b)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, expected.Equal(c))
|
||||
}
|
||||
func TestLsh(t *testing.T) {
|
||||
a := testMakeStackInt(t, 23)
|
||||
b := testMakeStackInt(t, 8)
|
||||
expected := testMakeStackInt(t, 5888)
|
||||
c, err := a.Lsh(b)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, expected.Equal(c))
|
||||
}
|
||||
|
||||
func TestRsh(t *testing.T) {
|
||||
a := testMakeStackInt(t, 128)
|
||||
b := testMakeStackInt(t, 3)
|
||||
expected := testMakeStackInt(t, 16)
|
||||
c, err := a.Rsh(b)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, expected.Equal(c))
|
||||
}
|
||||
|
||||
func TestByteArrConversion(t *testing.T) {
|
||||
|
||||
var num int64 = 100000
|
||||
|
||||
a := testMakeStackInt(t, num)
|
||||
ba, err := a.ByteArray()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, num, testReadInt64(t, ba.val))
|
||||
|
||||
have, err := ba.Integer()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, num, have.val.Int64())
|
||||
|
||||
}
|
61
_pkg.dev/vm/stack/invocationstack.go
Normal file
61
_pkg.dev/vm/stack/invocationstack.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package stack
|
||||
|
||||
import "errors"
|
||||
|
||||
// Invocation embeds a Random Access stack
|
||||
// Providing helper methods for the context object
|
||||
type Invocation struct{ RandomAccess }
|
||||
|
||||
//NewInvocation will return a new
|
||||
// Invocation stack
|
||||
func NewInvocation() *Invocation {
|
||||
return &Invocation{
|
||||
RandomAccess{
|
||||
vals: make([]Item, 0, StackAverageSize),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Invocation) peekContext(n uint16) (*Context, error) {
|
||||
item, err := i.Peek(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return item.Context()
|
||||
}
|
||||
|
||||
// CurrentContext returns the current context on the invocation stack
|
||||
func (i *Invocation) CurrentContext() (*Context, error) {
|
||||
return i.peekContext(0)
|
||||
}
|
||||
|
||||
// PopCurrentContext Pops a context item from the top of the stack
|
||||
func (i *Invocation) PopCurrentContext() (*Context, error) {
|
||||
item, err := i.Pop()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx, err := item.Context()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// CallingContext will return the cntext item
|
||||
// that will be called next.
|
||||
func (i *Invocation) CallingContext() (*Context, error) {
|
||||
if i.Len() < 1 {
|
||||
return nil, errors.New("Length of invocation stack is < 1, no calling context")
|
||||
}
|
||||
return i.peekContext(1)
|
||||
}
|
||||
|
||||
// EntryContext will return the context item that
|
||||
// started the program
|
||||
func (i *Invocation) EntryContext() (*Context, error) {
|
||||
|
||||
// firstItemIndex refers to the first item that was popped on the stack
|
||||
firstItemIndex := uint16(i.Len() - 1) // N.B. if this overflows because len is zero, then an error will be returned
|
||||
return i.peekContext(firstItemIndex)
|
||||
}
|
166
_pkg.dev/vm/stack/map.go
Normal file
166
_pkg.dev/vm/stack/map.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/crypto/hash"
|
||||
)
|
||||
|
||||
// Map represents a map of key, value pair on the stack.
|
||||
// Both key and value are stack Items.
|
||||
type Map struct {
|
||||
*abstractItem
|
||||
val map[Item]Item
|
||||
}
|
||||
|
||||
// NewMap returns a Map stack Item given
|
||||
// a map whose keys and values are stack Items.
|
||||
func NewMap(val map[Item]Item) (*Map, error) {
|
||||
return &Map{
|
||||
abstractItem: &abstractItem{},
|
||||
val: val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Map will overwrite the default implementation
|
||||
// to allow go to cast this item as an Map.
|
||||
func (m *Map) Map() (*Map, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Boolean overrides the default Boolean method
|
||||
// to convert an Map into a Boolean StackItem
|
||||
func (m *Map) Boolean() (*Boolean, error) {
|
||||
return NewBoolean(true), nil
|
||||
}
|
||||
|
||||
// ContainsKey returns a boolean whose value is true
|
||||
// iff the underlying map value contains the Item i
|
||||
// as a key.
|
||||
func (m *Map) ContainsKey(key Item) (*Boolean, error) {
|
||||
for k := range m.Value() {
|
||||
if ok, err := CompareHash(k, key); err != nil {
|
||||
return nil, err
|
||||
} else if ok.Value() {
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
}
|
||||
return NewBoolean(false), nil
|
||||
}
|
||||
|
||||
// Value returns the underlying map's value
|
||||
func (m *Map) Value() map[Item]Item {
|
||||
return m.val
|
||||
}
|
||||
|
||||
// Remove removes the Item i from the
|
||||
// underlying map's value.
|
||||
func (m *Map) Remove(key Item) error {
|
||||
var d Item
|
||||
for k := range m.Value() {
|
||||
if ok, err := CompareHash(k, key); err != nil {
|
||||
return err
|
||||
} else if ok.Value() {
|
||||
d = k
|
||||
}
|
||||
|
||||
}
|
||||
if d != nil {
|
||||
delete(m.Value(), d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add inserts a new key, value pair of Items into
|
||||
// the underlying map's value.
|
||||
func (m *Map) Add(key Item, value Item) error {
|
||||
for k := range m.Value() {
|
||||
if ok, err := CompareHash(k, key); err != nil {
|
||||
return err
|
||||
} else if ok.Value() {
|
||||
return errors.New("try to insert duplicate key! ")
|
||||
}
|
||||
}
|
||||
m.Value()[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueOfKey tries to get the value of the key Item
|
||||
// from the map's underlying value.
|
||||
func (m *Map) ValueOfKey(key Item) (Item, error) {
|
||||
for k, v := range m.Value() {
|
||||
if ok, err := CompareHash(k, key); err != nil {
|
||||
return nil, err
|
||||
} else if ok.Value() {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
// Clear empties the the underlying map's value.
|
||||
func (m *Map) Clear() {
|
||||
m.val = map[Item]Item{}
|
||||
}
|
||||
|
||||
// CompareHash compare the the Hashes of two items.
|
||||
// If they are equal it returns a true boolean. Otherwise
|
||||
// it returns false boolean. Item whose hashes are equal are
|
||||
// to be considered equal.
|
||||
func CompareHash(i1 Item, i2 Item) (*Boolean, error) {
|
||||
hash1, err := i1.Hash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash2, err := i2.Hash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hash1 == hash2 {
|
||||
return NewBoolean(true), nil
|
||||
}
|
||||
|
||||
return NewBoolean(false), nil
|
||||
}
|
||||
|
||||
// Hash overrides the default abstract hash method.
|
||||
func (m *Map) Hash() (string, error) {
|
||||
var hashSlice sort.StringSlice = []string{}
|
||||
var data = fmt.Sprintf("%T ", m)
|
||||
|
||||
for k, v := range m.Value() {
|
||||
hk, err := k.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hv, err := v.Hash()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hashSlice = append(hashSlice, hk)
|
||||
hashSlice = append(hashSlice, hv)
|
||||
}
|
||||
hashSlice.Sort()
|
||||
|
||||
for _, h := range hashSlice {
|
||||
data += h
|
||||
}
|
||||
|
||||
return KeyGenerator([]byte(data))
|
||||
}
|
||||
|
||||
// KeyGenerator hashes a byte slice to obtain a unique identifier.
|
||||
func KeyGenerator(data []byte) (string, error) {
|
||||
h, err := hash.Sha256([]byte(data))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return h.String(), nil
|
||||
}
|
141
_pkg.dev/vm/stack/map_test.go
Normal file
141
_pkg.dev/vm/stack/map_test.go
Normal file
|
@ -0,0 +1,141 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMap(t *testing.T) {
|
||||
// define Map m for testing
|
||||
var a Item = testMakeStackInt(t, 10)
|
||||
var b Item = NewBoolean(true)
|
||||
var c Item = NewByteArray([]byte{1, 2, 34})
|
||||
var d Item = testMakeStackMap(t, map[Item]Item{
|
||||
a: c,
|
||||
b: a,
|
||||
})
|
||||
var e = NewContext([]byte{1, 2, 3, 4})
|
||||
var f = testMakeArray(t, []Item{a, b})
|
||||
|
||||
val := map[Item]Item{
|
||||
a: c,
|
||||
b: a,
|
||||
c: b,
|
||||
d: a,
|
||||
e: d,
|
||||
f: e,
|
||||
}
|
||||
m := testMakeStackMap(t, val)
|
||||
|
||||
// test ValueOfKey
|
||||
valueA, _ := m.ValueOfKey(testMakeStackInt(t, 10))
|
||||
assert.Equal(t, c, valueA)
|
||||
|
||||
valueB, _ := m.ValueOfKey(b)
|
||||
assert.Equal(t, a, valueB)
|
||||
|
||||
valueC, _ := m.ValueOfKey(NewByteArray([]byte{1, 2, 34}))
|
||||
assert.Equal(t, b, valueC)
|
||||
|
||||
valueD, _ := m.ValueOfKey(testMakeStackMap(t, map[Item]Item{
|
||||
b: a,
|
||||
a: c,
|
||||
}))
|
||||
assert.Equal(t, a, valueD)
|
||||
|
||||
valueE, _ := m.ValueOfKey(NewContext([]byte{1, 2, 3, 4}))
|
||||
assert.Equal(t, d, valueE)
|
||||
|
||||
valueF, _ := m.ValueOfKey(testMakeArray(t, []Item{a, b}))
|
||||
assert.Equal(t, e, valueF)
|
||||
|
||||
valueX, _ := m.ValueOfKey(NewByteArray([]byte{1, 2, 35}))
|
||||
assert.NotEqual(t, b, valueX)
|
||||
|
||||
checkA, err := m.ContainsKey(a)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkA.Value())
|
||||
|
||||
//test ContainsKey
|
||||
checkB, err := m.ContainsKey(b)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkB.Value())
|
||||
|
||||
checkC, err := m.ContainsKey(c)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkC.Value())
|
||||
|
||||
checkD, err := m.ContainsKey(d)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkD.Value())
|
||||
|
||||
checkE, err := m.ContainsKey(e)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkE.Value())
|
||||
|
||||
//test CompareHash
|
||||
val2 := map[Item]Item{
|
||||
f: e,
|
||||
e: d,
|
||||
d: a,
|
||||
c: b,
|
||||
b: a,
|
||||
a: c,
|
||||
}
|
||||
m2 := testMakeStackMap(t, val2)
|
||||
checkMap, err := CompareHash(m, m2)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkMap.Value())
|
||||
|
||||
checkBoolean, err := CompareHash(b, NewBoolean(true))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkBoolean.Value())
|
||||
|
||||
checkByteArray, err := CompareHash(c, NewByteArray([]byte{1, 2, 34}))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkByteArray.Value())
|
||||
|
||||
checkContext, err := CompareHash(e, NewContext([]byte{1, 2, 3, 4}))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkContext.Value())
|
||||
|
||||
checkArray, err := CompareHash(f, testMakeArray(t, []Item{a, b}))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, checkArray.Value())
|
||||
}
|
||||
|
||||
func TestMapAdd(t *testing.T) {
|
||||
var a Item = testMakeStackInt(t, 10)
|
||||
var b Item = NewBoolean(true)
|
||||
var m = testMakeStackMap(t, map[Item]Item{})
|
||||
|
||||
err := m.Add(a, a)
|
||||
assert.Nil(t, err)
|
||||
err = m.Add(b, a)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(m.Value()))
|
||||
|
||||
expected := testMakeStackMap(t, map[Item]Item{b: a, a: a})
|
||||
check, err := CompareHash(m, expected)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, check.Value())
|
||||
|
||||
}
|
||||
|
||||
func TestMapRemove(t *testing.T) {
|
||||
var a Item = testMakeStackInt(t, 10)
|
||||
var b Item = NewBoolean(true)
|
||||
var m = testMakeStackMap(t, map[Item]Item{b: a, a: a})
|
||||
|
||||
err := m.Remove(a)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(m.Value()))
|
||||
|
||||
expected := testMakeStackMap(t, map[Item]Item{b: a})
|
||||
check, err := CompareHash(m, expected)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, check.Value())
|
||||
|
||||
}
|
186
_pkg.dev/vm/stack/stack.go
Normal file
186
_pkg.dev/vm/stack/stack.go
Normal file
|
@ -0,0 +1,186 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
// StackAverageSize is used to set the capacity of the stack
|
||||
// setting this number too low, will cause extra allocations
|
||||
StackAverageSize = 20
|
||||
)
|
||||
|
||||
// RandomAccess represents a Random Access Stack
|
||||
type RandomAccess struct {
|
||||
vals []Item
|
||||
}
|
||||
|
||||
// New will return a new random access stack
|
||||
func New() *RandomAccess {
|
||||
return &RandomAccess{
|
||||
vals: make([]Item, 0, StackAverageSize),
|
||||
}
|
||||
}
|
||||
|
||||
//Len will return the length of the stack
|
||||
func (ras *RandomAccess) Len() int {
|
||||
if ras.vals == nil {
|
||||
return -1
|
||||
}
|
||||
return len(ras.vals)
|
||||
}
|
||||
|
||||
// Clear will remove all items in the stack
|
||||
func (ras *RandomAccess) Clear() {
|
||||
ras.vals = make([]Item, 0, StackAverageSize)
|
||||
}
|
||||
|
||||
// Pop will remove the last stack item that was added
|
||||
func (ras *RandomAccess) Pop() (Item, error) {
|
||||
if len(ras.vals) == 0 {
|
||||
return nil, errors.New("There are no items on the stack to pop")
|
||||
}
|
||||
if ras.vals == nil {
|
||||
return nil, errors.New("Cannot pop from a nil stack")
|
||||
}
|
||||
|
||||
l := len(ras.vals)
|
||||
item := ras.vals[l-1]
|
||||
ras.vals = ras.vals[:l-1]
|
||||
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// Push will put a stack item onto the top of the stack
|
||||
func (ras *RandomAccess) Push(item Item) *RandomAccess {
|
||||
if ras.vals == nil {
|
||||
ras.vals = make([]Item, 0, StackAverageSize)
|
||||
}
|
||||
|
||||
ras.vals = append(ras.vals, item)
|
||||
|
||||
return ras
|
||||
}
|
||||
|
||||
// Insert will push a stackItem onto the stack at position `n`
|
||||
// Note; index 0 is the top of the stack, which is the end of slice
|
||||
func (ras *RandomAccess) Insert(n uint16, item Item) (*RandomAccess, error) {
|
||||
|
||||
if n == 0 {
|
||||
return ras.Push(item), nil
|
||||
}
|
||||
|
||||
if ras.vals == nil {
|
||||
ras.vals = make([]Item, 0, StackAverageSize)
|
||||
}
|
||||
|
||||
// Check that we are not inserting out of the bounds
|
||||
stackSize := uint16(len(ras.vals))
|
||||
if n > stackSize-1 {
|
||||
return nil, fmt.Errorf("Tried to insert at index %d when length of stack is %d", n, len(ras.vals))
|
||||
}
|
||||
|
||||
index := stackSize - n
|
||||
|
||||
ras.vals = append(ras.vals, item)
|
||||
copy(ras.vals[index:], ras.vals[index-1:])
|
||||
ras.vals[index] = item
|
||||
|
||||
return ras, nil
|
||||
}
|
||||
|
||||
// Peek will check an element at a given index
|
||||
// Note: 0 is the top of the stack, which is the end of the slice
|
||||
func (ras *RandomAccess) Peek(n uint16) (Item, error) {
|
||||
|
||||
stackSize := uint16(len(ras.vals))
|
||||
|
||||
if n == 0 {
|
||||
index := stackSize - 1
|
||||
return ras.vals[index], nil
|
||||
}
|
||||
|
||||
if ras.Len() < 1 {
|
||||
return nil, fmt.Errorf("cannot peak at a stack with no item, length of stack is %d", ras.Len())
|
||||
}
|
||||
|
||||
// Check that we are not peeking out of the bounds
|
||||
if n > stackSize-1 {
|
||||
return nil, fmt.Errorf("Tried to peek at index %d when length of stack is %d", n, len(ras.vals))
|
||||
}
|
||||
index := stackSize - n - 1
|
||||
|
||||
return ras.vals[index], nil
|
||||
}
|
||||
|
||||
// CopyTo will copy all of the stack items from `ras` into the stack that is passed as an argument
|
||||
// XXX: once maxstacksize is implemented, we will return error if size goes over
|
||||
// There will also be additional checks needed once stack isolation is added
|
||||
func (ras *RandomAccess) CopyTo(stack *RandomAccess) error {
|
||||
stack.vals = append(stack.vals, ras.vals...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set sets the n-item from the stack
|
||||
// starting from the top of the stack with the new item.
|
||||
// the n-item to replace is located at the position "len(stack)-index-1".
|
||||
func (ras *RandomAccess) Set(index uint16, item Item) error {
|
||||
stackSize := uint16(len(ras.vals))
|
||||
if ok := index >= stackSize; ok {
|
||||
return errors.New("index out of range")
|
||||
}
|
||||
|
||||
n := stackSize - index - 1
|
||||
ras.vals[n] = item
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convenience Functions
|
||||
|
||||
// PopInt will remove the last stack item that was added
|
||||
// And cast it to an integer
|
||||
func (ras *RandomAccess) PopInt() (*Int, error) {
|
||||
item, err := ras.Pop()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return item.Integer()
|
||||
}
|
||||
|
||||
// PopByteArray will remove the last stack item that was added
|
||||
// And cast it to an ByteArray
|
||||
func (ras *RandomAccess) PopByteArray() (*ByteArray, error) {
|
||||
item, err := ras.Pop()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return item.ByteArray()
|
||||
}
|
||||
|
||||
// PopBoolean will remove the last stack item that was added
|
||||
// and cast it to a Boolean.
|
||||
func (ras *RandomAccess) PopBoolean() (*Boolean, error) {
|
||||
item, err := ras.Pop()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return item.Boolean()
|
||||
}
|
||||
|
||||
// Remove removes the n-item from the stack
|
||||
// starting from the top of the stack. In other words
|
||||
// the n-item to remove is located at the index "len(stack)-n-1"
|
||||
func (ras *RandomAccess) Remove(n uint16) (Item, error) {
|
||||
if int(n) >= len(ras.vals) {
|
||||
return nil, errors.New("index out of range")
|
||||
}
|
||||
|
||||
index := uint16(len(ras.vals)) - n - 1
|
||||
item := ras.vals[index]
|
||||
|
||||
ras.vals = append(ras.vals[:index], ras.vals[index+1:]...)
|
||||
|
||||
return item, nil
|
||||
}
|
161
_pkg.dev/vm/stack/stack_test.go
Normal file
161
_pkg.dev/vm/stack/stack_test.go
Normal file
|
@ -0,0 +1,161 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStackPushPop(t *testing.T) {
|
||||
// Create two stack Integers
|
||||
a, err := NewInt(big.NewInt(10))
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
b, err := NewInt(big.NewInt(20))
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// Create a new stack
|
||||
testStack := New()
|
||||
|
||||
// Push to stack
|
||||
testStack.Push(a).Push(b)
|
||||
|
||||
// There should only be two values on the stack
|
||||
assert.Equal(t, 2, testStack.Len())
|
||||
|
||||
// Pop first element and it should be equal to b
|
||||
stackElement, err := testStack.Pop()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
item, err := stackElement.Integer()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
assert.Equal(t, true, item.Equal(b))
|
||||
|
||||
// Pop second element and it should be equal to a
|
||||
stackElement, err = testStack.Pop()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
item, err = stackElement.Integer()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
assert.Equal(t, true, item.Equal(a))
|
||||
|
||||
// We should get an error as there are nomore items left to pop
|
||||
_, err = testStack.Pop()
|
||||
assert.NotNil(t, err)
|
||||
|
||||
}
|
||||
|
||||
// For this test to pass, we should get an error when popping from a nil stack
|
||||
// and we should initialise and push an element if pushing to an empty stack
|
||||
func TestPushPopNil(t *testing.T) {
|
||||
|
||||
// stack is nil when initialised without New constructor
|
||||
testStack := RandomAccess{}
|
||||
|
||||
// Popping from nil stack
|
||||
// - should give an error
|
||||
// - element returned should be nil
|
||||
stackElement, err := testStack.Pop()
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, stackElement)
|
||||
|
||||
// stack should still be nil after failing to pop
|
||||
assert.Nil(t, testStack.vals)
|
||||
|
||||
// create a random test stack item
|
||||
a, err := NewInt(big.NewInt(2))
|
||||
assert.Nil(t, err)
|
||||
|
||||
// push random item to stack
|
||||
testStack.Push(a)
|
||||
|
||||
// push should initialise the stack and put one element on the stack
|
||||
assert.Equal(t, 1, testStack.Len())
|
||||
}
|
||||
|
||||
// Test passes if we can peek and modify an item
|
||||
//without modifying the value on the stack
|
||||
func TestStackPeekMutability(t *testing.T) {
|
||||
|
||||
testStack := New()
|
||||
|
||||
a, err := NewInt(big.NewInt(2))
|
||||
assert.Nil(t, err)
|
||||
b, err := NewInt(big.NewInt(3))
|
||||
assert.Nil(t, err)
|
||||
|
||||
testStack.Push(a).Push(b)
|
||||
|
||||
peekedItem := testPeekInteger(t, testStack, 0)
|
||||
assert.Equal(t, true, peekedItem.Equal(b))
|
||||
|
||||
// Check that by modifying the peeked value,
|
||||
// we did not modify the item on the stack
|
||||
peekedItem = a
|
||||
peekedItem.val = big.NewInt(0)
|
||||
|
||||
// Pop item from stack and check it is still the same
|
||||
poppedItem := testPopInteger(t, testStack)
|
||||
assert.Equal(t, true, poppedItem.Equal(b))
|
||||
}
|
||||
func TestStackPeek(t *testing.T) {
|
||||
|
||||
testStack := New()
|
||||
|
||||
values := []int64{23, 45, 67, 89, 12, 344}
|
||||
for _, val := range values {
|
||||
a := testMakeStackInt(t, val)
|
||||
testStack.Push(a)
|
||||
}
|
||||
|
||||
// i starts at 0, j starts at len(values)-1
|
||||
for i, j := 0, len(values)-1; j >= 0; i, j = i+1, j-1 {
|
||||
|
||||
peekedItem := testPeekInteger(t, testStack, uint16(i))
|
||||
a := testMakeStackInt(t, values[j])
|
||||
|
||||
fmt.Printf("%#v\n", peekedItem.val.Int64())
|
||||
|
||||
assert.Equal(t, true, a.Equal(peekedItem))
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStackInsert(t *testing.T) {
|
||||
|
||||
testStack := New()
|
||||
|
||||
a := testMakeStackInt(t, 2)
|
||||
b := testMakeStackInt(t, 4)
|
||||
c := testMakeStackInt(t, 6)
|
||||
|
||||
// insert on an empty stack should put element on top
|
||||
_, err := testStack.Insert(0, a)
|
||||
assert.Equal(t, err, nil)
|
||||
_, err = testStack.Insert(0, b)
|
||||
assert.Equal(t, err, nil)
|
||||
_, err = testStack.Insert(1, c)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
// Order should be [a,c,b]
|
||||
pop1 := testPopInteger(t, testStack)
|
||||
pop2 := testPopInteger(t, testStack)
|
||||
pop3 := testPopInteger(t, testStack)
|
||||
|
||||
assert.Equal(t, true, pop1.Equal(b))
|
||||
assert.Equal(t, true, pop2.Equal(c))
|
||||
assert.Equal(t, true, pop3.Equal(a))
|
||||
|
||||
}
|
61
_pkg.dev/vm/stack/stackitem.go
Normal file
61
_pkg.dev/vm/stack/stackitem.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
//Item is an interface which represents object that can be placed on the stack
|
||||
type Item interface {
|
||||
Integer() (*Int, error)
|
||||
Boolean() (*Boolean, error)
|
||||
ByteArray() (*ByteArray, error)
|
||||
Array() (*Array, error)
|
||||
Context() (*Context, error)
|
||||
Map() (*Map, error)
|
||||
Hash() (string, error)
|
||||
}
|
||||
|
||||
// Represents an `abstract` stack item
|
||||
// which will hold default values for stack items
|
||||
// this is intended to be embedded into types that you will use on the stack
|
||||
type abstractItem struct{}
|
||||
|
||||
// Integer is the default implementation for a stackItem
|
||||
// Implements Item interface
|
||||
func (a *abstractItem) Integer() (*Int, error) {
|
||||
return nil, errors.New("This stack item is not an Integer")
|
||||
}
|
||||
|
||||
// Boolean is the default implementation for a stackItem
|
||||
// Implements Item interface
|
||||
func (a *abstractItem) Boolean() (*Boolean, error) {
|
||||
return nil, errors.New("This stack item is not a Boolean")
|
||||
}
|
||||
|
||||
// ByteArray is the default implementation for a stackItem
|
||||
// Implements Item interface
|
||||
func (a *abstractItem) ByteArray() (*ByteArray, error) {
|
||||
return nil, errors.New("This stack item is not a byte array")
|
||||
}
|
||||
|
||||
// Array is the default implementation for a stackItem
|
||||
// Implements Item interface
|
||||
func (a *abstractItem) Array() (*Array, error) {
|
||||
return nil, errors.New("This stack item is not an array")
|
||||
}
|
||||
|
||||
// Context is the default implementation for a stackItem
|
||||
// Implements Item interface
|
||||
func (a *abstractItem) Context() (*Context, error) {
|
||||
return nil, errors.New("This stack item is not of type context")
|
||||
}
|
||||
|
||||
// Context is the default implementation for a stackItem
|
||||
// Implements Item interface
|
||||
func (a *abstractItem) Map() (*Map, error) {
|
||||
return nil, errors.New("This stack item is not a map")
|
||||
}
|
||||
|
||||
func (a *abstractItem) Hash() (string, error) {
|
||||
return "", errors.New("This stack item need to override the Hash Method")
|
||||
}
|
68
_pkg.dev/vm/stack/stackitem_test.go
Normal file
68
_pkg.dev/vm/stack/stackitem_test.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// A simple test to ensure that by embedding the abstract interface
|
||||
// we immediately become a stack item, with the default values set to nil
|
||||
func TestInterfaceEmbedding(t *testing.T) {
|
||||
|
||||
// Create an anonymous struct that embeds the abstractItem
|
||||
a := struct {
|
||||
*abstractItem
|
||||
}{
|
||||
&abstractItem{},
|
||||
}
|
||||
|
||||
// Since interface checking can be done at compile time.
|
||||
// If he abstractItem did not implement all methods of our interface `Item`
|
||||
// Then any struct which embeds it, will also not implement the Item interface.
|
||||
// This test would then give errors, at compile time.
|
||||
var Items []Item
|
||||
Items = append(Items, a)
|
||||
|
||||
// Default methods should give errors
|
||||
// Here we just need to test against one of the methods in the interface
|
||||
for _, element := range Items {
|
||||
x, err := element.Integer()
|
||||
assert.Nil(t, x)
|
||||
assert.NotNil(t, err, nil)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestIntCasting is a simple test to test that the Integer method is overwritten
|
||||
// from the abstractItem
|
||||
func TestIntMethodOverride(t *testing.T) {
|
||||
|
||||
testValues := []int64{0, 10, 200, 30, 90}
|
||||
var Items []Item
|
||||
|
||||
// Convert a range of int64s into Stack Integers
|
||||
// Adding them into an array of StackItems
|
||||
for _, num := range testValues {
|
||||
stackInteger, err := NewInt(big.NewInt(num))
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
Items = append(Items, stackInteger)
|
||||
}
|
||||
|
||||
// For each item, call the Integer method on the interface
|
||||
// Which should return an integer and no error
|
||||
// as the stack integer struct overrides that method
|
||||
for i, element := range Items {
|
||||
k, err := element.Integer()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
if k.val.Cmp(big.NewInt(testValues[i])) != 0 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
61
_pkg.dev/vm/stack/test_helper.go
Normal file
61
_pkg.dev/vm/stack/test_helper.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// helper functions
|
||||
func testPeekInteger(t *testing.T, tStack *RandomAccess, n uint16) *Int {
|
||||
stackElement, err := tStack.Peek(n)
|
||||
assert.Nil(t, err)
|
||||
item, err := stackElement.Integer()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func testPopInteger(t *testing.T, tStack *RandomAccess) *Int {
|
||||
stackElement, err := tStack.Pop()
|
||||
assert.Nil(t, err)
|
||||
item, err := stackElement.Integer()
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func testMakeStackInt(t *testing.T, num int64) *Int {
|
||||
a, err := NewInt(big.NewInt(num))
|
||||
assert.Nil(t, err)
|
||||
return a
|
||||
}
|
||||
|
||||
func testReadInt64(t *testing.T, data []byte) int64 {
|
||||
var ret int64
|
||||
var arr [8]byte
|
||||
|
||||
// expands or shrinks data automatically
|
||||
copy(arr[:], data)
|
||||
buf := bytes.NewBuffer(arr[:])
|
||||
err := binary.Read(buf, binary.LittleEndian, &ret)
|
||||
assert.Nil(t, err)
|
||||
return ret
|
||||
}
|
||||
|
||||
func testMakeStackMap(t *testing.T, m map[Item]Item) *Map {
|
||||
a, err := NewMap(m)
|
||||
assert.Nil(t, err)
|
||||
return a
|
||||
}
|
||||
|
||||
func testMakeArray(t *testing.T, v []Item) *Array {
|
||||
a, err := NewArray(v)
|
||||
assert.Nil(t, err)
|
||||
return a
|
||||
}
|
20
_pkg.dev/vm/state.go
Normal file
20
_pkg.dev/vm/state.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
package vm
|
||||
|
||||
//Vmstate represents all possible states that the neo-vm can be in
|
||||
type Vmstate byte
|
||||
|
||||
// List of possible vm states
|
||||
const (
|
||||
// NONE is the running state of the vm
|
||||
// NONE signifies that the vm is ready to process an opcode
|
||||
NONE = 0
|
||||
// HALT is a stopped state of the vm
|
||||
// where the stop was signalled by the program completion
|
||||
HALT = 1 << 0
|
||||
// FAULT is a stopped state of the vm
|
||||
// where the stop was signalled by an error in the program
|
||||
FAULT = 1 << 1
|
||||
// BREAK is a suspended state for the VM
|
||||
// were the break was signalled by a breakpoint
|
||||
BREAK = 1 << 2
|
||||
)
|
72
_pkg.dev/vm/vm.go
Normal file
72
_pkg.dev/vm/vm.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/vm/stack"
|
||||
)
|
||||
|
||||
// VM represents an instance of a Neo Virtual Machine
|
||||
type VM struct {
|
||||
// ResultStack contains the results of
|
||||
// the last evaluation stack before the program terminated
|
||||
ResultStack stack.RandomAccess
|
||||
// InvocationStack contains all of the contexts
|
||||
// loaded into the vm
|
||||
InvocationStack stack.Invocation
|
||||
state Vmstate
|
||||
}
|
||||
|
||||
// NewVM will:
|
||||
// Set the state of the VM to NONE
|
||||
// instantiate a script as a new context
|
||||
// Push the Context to the Invocation stack
|
||||
func NewVM(script []byte) *VM {
|
||||
ctx := stack.NewContext(script)
|
||||
v := &VM{
|
||||
state: NONE,
|
||||
}
|
||||
v.InvocationStack.Push(ctx)
|
||||
return v
|
||||
}
|
||||
|
||||
// Run loops over the current context by continuously stepping.
|
||||
// Run breaks; once step returns an error or any state that is not NONE
|
||||
func (v *VM) Run() (Vmstate, error) {
|
||||
for {
|
||||
state, err := v.step()
|
||||
if err != nil || state != NONE {
|
||||
return state, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// step will read `one` opcode from the script in the current context
|
||||
// Then excute that opcode
|
||||
func (v *VM) step() (Vmstate, error) {
|
||||
// Get Current Context
|
||||
ctx, err := v.InvocationStack.CurrentContext()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
// Read Opcode from context
|
||||
op, _ := ctx.Next() // The only error that can occur from this, is if the pointer goes over the pointer
|
||||
// In the NEO-VM specs, this is ignored and we return the RET opcode
|
||||
// Execute OpCode
|
||||
state, err := v.executeOp(stack.Instruction(op), ctx)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ExecuteOp will execute one opcode on a given context.
|
||||
// If the opcode is not registered, then an unknown opcode error will be returned
|
||||
func (v *VM) executeOp(op stack.Instruction, ctx *stack.Context) (Vmstate, error) {
|
||||
//Find function which handles that specific opcode
|
||||
handleOp, ok := opFunc[op]
|
||||
if !ok {
|
||||
return FAULT, fmt.Errorf("unknown opcode entered %v", op)
|
||||
}
|
||||
return handleOp(op, ctx, &v.InvocationStack, &v.ResultStack)
|
||||
}
|
73
_pkg.dev/vm/vm_ops.go
Normal file
73
_pkg.dev/vm/vm_ops.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
package vm
|
||||
|
||||
import "github.com/CityOfZion/neo-go/pkg/vm/stack"
|
||||
|
||||
type stackInfo func(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error)
|
||||
|
||||
var opFunc = map[stack.Instruction]stackInfo{
|
||||
stack.TUCK: TUCK,
|
||||
stack.SWAP: SWAP,
|
||||
stack.ROT: ROT,
|
||||
stack.ROLL: ROLL,
|
||||
stack.PICK: PICK,
|
||||
stack.OVER: OVER,
|
||||
stack.NIP: NIP,
|
||||
stack.DUP: DUP,
|
||||
stack.DROP: DROP,
|
||||
stack.DEPTH: DEPTH,
|
||||
stack.XTUCK: XTUCK,
|
||||
stack.XSWAP: XSWAP,
|
||||
stack.XDROP: XDROP,
|
||||
stack.FROMALTSTACK: FROMALTSTACK,
|
||||
stack.TOALTSTACK: TOALTSTACK,
|
||||
stack.DUPFROMALTSTACK: DUPFROMALTSTACK,
|
||||
stack.JMPIFNOT: JMPIFNOT,
|
||||
stack.JMPIF: JMPIF,
|
||||
stack.JMP: JMP,
|
||||
stack.NOP: NOP,
|
||||
stack.HASH256: HASH256,
|
||||
stack.HASH160: HASH160,
|
||||
stack.SHA256: SHA256,
|
||||
stack.SHA1: SHA1,
|
||||
stack.XOR: Xor,
|
||||
stack.OR: Or,
|
||||
stack.AND: And,
|
||||
stack.INVERT: Invert,
|
||||
stack.MIN: Min,
|
||||
stack.MAX: Max,
|
||||
stack.WITHIN: Within,
|
||||
stack.NUMEQUAL: NumEqual,
|
||||
stack.NUMNOTEQUAL: NumNotEqual,
|
||||
stack.BOOLAND: BoolAnd,
|
||||
stack.BOOLOR: BoolOr,
|
||||
stack.LT: Lt,
|
||||
stack.LTE: Lte,
|
||||
stack.GT: Gt,
|
||||
stack.GTE: Gte,
|
||||
stack.SHR: Shr,
|
||||
stack.SHL: Shl,
|
||||
stack.INC: Inc,
|
||||
stack.DEC: Dec,
|
||||
stack.DIV: Div,
|
||||
stack.MOD: Mod,
|
||||
stack.NZ: Nz,
|
||||
stack.MUL: Mul,
|
||||
stack.ABS: Abs,
|
||||
stack.NOT: Not,
|
||||
stack.SIGN: Sign,
|
||||
stack.NEGATE: Negate,
|
||||
stack.ADD: Add,
|
||||
stack.SUB: Sub,
|
||||
stack.PUSHBYTES1: PushNBytes,
|
||||
stack.PUSHBYTES75: PushNBytes,
|
||||
stack.RET: RET,
|
||||
stack.EQUAL: EQUAL,
|
||||
stack.THROWIFNOT: THROWIFNOT,
|
||||
stack.THROW: THROW,
|
||||
}
|
||||
|
||||
func init() {
|
||||
for i := int(stack.PUSHBYTES1); i <= int(stack.PUSHBYTES75); i++ {
|
||||
opFunc[stack.Instruction(i)] = PushNBytes
|
||||
}
|
||||
}
|
102
_pkg.dev/vm/vm_ops_bitwise.go
Normal file
102
_pkg.dev/vm/vm_ops_bitwise.go
Normal file
|
@ -0,0 +1,102 @@
|
|||
package vm
|
||||
|
||||
import "github.com/CityOfZion/neo-go/pkg/vm/stack"
|
||||
|
||||
// Bitwise logic
|
||||
|
||||
// EQUAL pushes true to the stack
|
||||
// If the two top items on the stack are equal
|
||||
func EQUAL(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
|
||||
itemA, itemB, err := popTwoByteArrays(ctx)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
ctx.Estack.Push(itemA.Equals(itemB))
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// Invert pops an integer x off of the stack and
|
||||
// pushes an integer on the stack whose value
|
||||
// is the bitwise complement of the value of x.
|
||||
// Returns an error if the popped value is not an integer or
|
||||
// if the bitwise complement cannot be taken.
|
||||
func Invert(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
|
||||
i, err := ctx.Estack.PopInt()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
inv, err := i.Invert()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
ctx.Estack.Push(inv)
|
||||
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// And pops two integer off of the stack and
|
||||
// pushes an integer onto the stack whose value
|
||||
// is the result of the application of the bitwise AND
|
||||
// operator to the two original integers' values.
|
||||
// Returns an error if either items cannot be casted to an integer.
|
||||
func And(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
|
||||
operandA, operandB, err := popTwoIntegers(ctx)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
res, err := operandA.And(operandB)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
ctx.Estack.Push(res)
|
||||
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// Or pops two integer off of the stack and
|
||||
// pushes an integer onto the stack whose value
|
||||
// is the result of the application of the bitwise OR
|
||||
// operator to the two original integers' values.
|
||||
// Returns an error if either items cannot be casted to an integer.
|
||||
func Or(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
|
||||
operandA, operandB, err := popTwoIntegers(ctx)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
res, err := operandA.Or(operandB)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
ctx.Estack.Push(res)
|
||||
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// Xor pops two integer off of the stack and
|
||||
// pushes an integer onto the stack whose value
|
||||
// is the result of the application of the bitwise XOR
|
||||
// operator to the two original integers' values.
|
||||
// Returns an error if either items cannot be casted to an integer.
|
||||
func Xor(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
|
||||
operandA, operandB, err := popTwoIntegers(ctx)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
res, err := operandA.Xor(operandB)
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
ctx.Estack.Push(res)
|
||||
|
||||
return NONE, nil
|
||||
}
|
142
_pkg.dev/vm/vm_ops_bitwise_test.go
Normal file
142
_pkg.dev/vm/vm_ops_bitwise_test.go
Normal file
|
@ -0,0 +1,142 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/vm/stack"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInvertOp(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
// 0000 00110 = 5
|
||||
a, err := stack.NewInt(big.NewInt(5))
|
||||
assert.Nil(t, err)
|
||||
|
||||
ctx := stack.NewContext([]byte{})
|
||||
ctx.Estack.Push(a)
|
||||
|
||||
// 1111 11001 = -6 (two complement representation)
|
||||
_, err = v.executeOp(stack.INVERT, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 1, ctx.Estack.Len())
|
||||
|
||||
item, err := ctx.Estack.PopInt()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, int64(-6), item.Value().Int64())
|
||||
}
|
||||
|
||||
func TestAndOp(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
// 110001 = 49
|
||||
a, err := stack.NewInt(big.NewInt(49))
|
||||
assert.Nil(t, err)
|
||||
|
||||
// 100011 = 35
|
||||
b, err := stack.NewInt(big.NewInt(35))
|
||||
assert.Nil(t, err)
|
||||
|
||||
ctx := stack.NewContext([]byte{})
|
||||
ctx.Estack.Push(a).Push(b)
|
||||
|
||||
// 100001 = 33
|
||||
_, err = v.executeOp(stack.AND, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 1, ctx.Estack.Len())
|
||||
|
||||
item, err := ctx.Estack.PopInt()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, int64(33), item.Value().Int64())
|
||||
}
|
||||
|
||||
func TestOrOp(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
// 110001 = 49
|
||||
a, err := stack.NewInt(big.NewInt(49))
|
||||
assert.Nil(t, err)
|
||||
|
||||
// 100011 = 35
|
||||
b, err := stack.NewInt(big.NewInt(35))
|
||||
assert.Nil(t, err)
|
||||
|
||||
ctx := stack.NewContext([]byte{})
|
||||
ctx.Estack.Push(a).Push(b)
|
||||
|
||||
// 110011 = 51 (49 OR 35)
|
||||
_, err = v.executeOp(stack.OR, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 1, ctx.Estack.Len())
|
||||
|
||||
item, err := ctx.Estack.PopInt()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, int64(51), item.Value().Int64())
|
||||
}
|
||||
|
||||
func TestXorOp(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
// 110001 = 49
|
||||
a, err := stack.NewInt(big.NewInt(49))
|
||||
assert.Nil(t, err)
|
||||
|
||||
// 100011 = 35
|
||||
b, err := stack.NewInt(big.NewInt(35))
|
||||
assert.Nil(t, err)
|
||||
|
||||
ctx := stack.NewContext([]byte{})
|
||||
ctx.Estack.Push(a).Push(b)
|
||||
|
||||
// 010010 = 18 (49 XOR 35)
|
||||
_, err = v.executeOp(stack.XOR, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 1, ctx.Estack.Len())
|
||||
|
||||
item, err := ctx.Estack.PopInt()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, int64(18), item.Value().Int64())
|
||||
}
|
||||
|
||||
func TestEqualOp(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
a, err := stack.NewInt(big.NewInt(10))
|
||||
assert.Nil(t, err)
|
||||
|
||||
b, err := stack.NewInt(big.NewInt(10))
|
||||
assert.Nil(t, err)
|
||||
|
||||
ctx := stack.NewContext([]byte{})
|
||||
ctx.Estack.Push(a).Push(b)
|
||||
|
||||
_, err = v.executeOp(stack.EQUAL, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 1, ctx.Estack.Len())
|
||||
|
||||
item, err := ctx.Estack.PopBoolean()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, true, item.Value())
|
||||
}
|
39
_pkg.dev/vm/vm_ops_exceptions.go
Normal file
39
_pkg.dev/vm/vm_ops_exceptions.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/vm/stack"
|
||||
)
|
||||
|
||||
// vm exceptions
|
||||
|
||||
// THROWIFNOT faults if the item on the top of the stack
|
||||
// does not evaluate to true
|
||||
// For specific logic on how a number of bytearray is evaluated can be seen
|
||||
// from the boolean conversion methods on the stack items
|
||||
func THROWIFNOT(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
|
||||
// Pop item from top of stack
|
||||
item, err := ctx.Estack.Pop()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
// Convert to a boolean
|
||||
ok, err := item.Boolean()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
// If false, throw
|
||||
if !ok.Value() {
|
||||
return FAULT, errors.New("item on top of stack evaluates to false")
|
||||
}
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// THROW returns a FAULT VM state. This indicate that there is an error in the
|
||||
// current context loaded program.
|
||||
func THROW(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
return FAULT, errors.New("the execution of the script program end with an error")
|
||||
}
|
96
_pkg.dev/vm/vm_ops_flow.go
Normal file
96
_pkg.dev/vm/vm_ops_flow.go
Normal file
|
@ -0,0 +1,96 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"github.com/CityOfZion/neo-go/pkg/vm/stack"
|
||||
)
|
||||
|
||||
// Flow control
|
||||
|
||||
// RET Returns from the current context
|
||||
// Returns HALT if there are nomore context's to run
|
||||
func RET(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
_ = ctx // fix SA4009 warning
|
||||
|
||||
// Pop current context from the Inovation stack
|
||||
ctx, err := istack.PopCurrentContext()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
// If this was the last context, then we copy over the evaluation stack to the resultstack
|
||||
// As the program is about to terminate, once we remove the context
|
||||
if istack.Len() == 0 {
|
||||
|
||||
err = ctx.Estack.CopyTo(rstack)
|
||||
return HALT, err
|
||||
}
|
||||
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// NOP Returns NONE VMState.
|
||||
func NOP(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// JMP moves the instruction pointer to an offset which is
|
||||
// calculated base on the instructionPointerOffset method.
|
||||
// Returns and error if the offset is out of range.
|
||||
func JMP(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
offset := instructionPointerOffset(ctx)
|
||||
if err := ctx.SetIP(offset); err != nil {
|
||||
return FAULT, err
|
||||
|
||||
}
|
||||
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// JMPIF pops a boolean off of the stack and,
|
||||
// if the the boolean's value is true, it
|
||||
// moves the instruction pointer to an offset which is
|
||||
// calculated base on the instructionPointerOffset method.
|
||||
// Returns and error if the offset is out of range or
|
||||
// the popped item is not a boolean.
|
||||
func JMPIF(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
b, err := ctx.Estack.PopBoolean()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
if b.Value() {
|
||||
offset := instructionPointerOffset(ctx)
|
||||
if err := ctx.SetIP(offset); err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
// JMPIFNOT pops a boolean off of the stack and,
|
||||
// if the the boolean's value is false, it
|
||||
// moves the instruction pointer to an offset which is
|
||||
// calculated base on the instructionPointerOffset method.
|
||||
// Returns and error if the offset is out of range or
|
||||
// the popped item is not a boolean.
|
||||
func JMPIFNOT(op stack.Instruction, ctx *stack.Context, istack *stack.Invocation, rstack *stack.RandomAccess) (Vmstate, error) {
|
||||
b, err := ctx.Estack.PopBoolean()
|
||||
if err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
if !b.Value() {
|
||||
offset := instructionPointerOffset(ctx)
|
||||
if err := ctx.SetIP(offset); err != nil {
|
||||
return FAULT, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return NONE, nil
|
||||
}
|
||||
|
||||
func instructionPointerOffset(ctx *stack.Context) int {
|
||||
return ctx.IP() + int(ctx.ReadInt16()) - 3
|
||||
}
|
174
_pkg.dev/vm/vm_ops_flow_test.go
Normal file
174
_pkg.dev/vm/vm_ops_flow_test.go
Normal file
|
@ -0,0 +1,174 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/CityOfZion/neo-go/pkg/vm/stack"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNopOp(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
a, err := stack.NewInt(big.NewInt(10))
|
||||
assert.Nil(t, err)
|
||||
|
||||
ctx := stack.NewContext([]byte{})
|
||||
ctx.Estack.Push(a)
|
||||
|
||||
_, err = v.executeOp(stack.NOP, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 1, ctx.Estack.Len())
|
||||
|
||||
item, err := ctx.Estack.PopInt()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, int64(10), item.Value().Int64())
|
||||
}
|
||||
|
||||
func TestJmpOp(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
a, err := stack.NewInt(big.NewInt(10))
|
||||
assert.Nil(t, err)
|
||||
|
||||
ctx := stack.NewContext([]byte{5, 0, 2, 3, 4})
|
||||
ctx.Estack.Push(a)
|
||||
|
||||
// ctx.ip = -1
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 0, ctx.IP())
|
||||
|
||||
// ctx.ip will be set to offset.
|
||||
// offset = ctx.IP() + int(ctx.ReadInt16()) - 3
|
||||
// = 0 + 5 -3 = 2
|
||||
_, err = v.executeOp(stack.JMP, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 1, ctx.Estack.Len())
|
||||
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 3, ctx.IP())
|
||||
}
|
||||
|
||||
// test JMPIF instruction with true boolean
|
||||
// on top of the stack
|
||||
func TestJmpIfOp1(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
a := stack.NewBoolean(true)
|
||||
|
||||
ctx := stack.NewContext([]byte{5, 0, 2, 3, 4})
|
||||
ctx.Estack.Push(a)
|
||||
|
||||
// ctx.ip = -1
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 0, ctx.IP())
|
||||
|
||||
// ctx.ip will be set to offset
|
||||
// because the there is a true boolean
|
||||
// on top of the stack.
|
||||
// offset = ctx.IP() + int(ctx.ReadInt16()) - 3
|
||||
// = 0 + 5 -3 = 2
|
||||
_, err := v.executeOp(stack.JMPIF, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have 0 item
|
||||
assert.Equal(t, 0, ctx.Estack.Len())
|
||||
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 3, ctx.IP())
|
||||
}
|
||||
|
||||
// test JMPIF instruction with false boolean
|
||||
// on top of the stack
|
||||
func TestJmpIfOp2(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
a := stack.NewBoolean(false)
|
||||
|
||||
ctx := stack.NewContext([]byte{5, 0, 2, 3, 4})
|
||||
ctx.Estack.Push(a)
|
||||
|
||||
// ctx.ip = -1
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 0, ctx.IP())
|
||||
|
||||
// nothing will happen because
|
||||
// the value of the boolean on top of the stack
|
||||
// is false
|
||||
_, err := v.executeOp(stack.JMPIF, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have 0 item
|
||||
assert.Equal(t, 0, ctx.Estack.Len())
|
||||
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 0, ctx.IP())
|
||||
}
|
||||
|
||||
// test JMPIFNOT instruction with true boolean
|
||||
// on top of the stack
|
||||
func TestJmpIfNotOp1(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
a := stack.NewBoolean(true)
|
||||
|
||||
ctx := stack.NewContext([]byte{5, 0, 2, 3, 4})
|
||||
ctx.Estack.Push(a)
|
||||
|
||||
// ctx.ip = -1
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 0, ctx.IP())
|
||||
|
||||
// nothing will happen because
|
||||
// the value of the boolean on top of the stack
|
||||
// is true
|
||||
_, err := v.executeOp(stack.JMPIFNOT, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have 0 item
|
||||
assert.Equal(t, 0, ctx.Estack.Len())
|
||||
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 0, ctx.IP())
|
||||
}
|
||||
|
||||
// test JMPIFNOT instruction with false boolean
|
||||
// on top of the stack
|
||||
func TestJmpIfNotOp2(t *testing.T) {
|
||||
|
||||
v := VM{}
|
||||
|
||||
a := stack.NewBoolean(false)
|
||||
|
||||
ctx := stack.NewContext([]byte{5, 0, 2, 3, 4})
|
||||
ctx.Estack.Push(a)
|
||||
|
||||
// ctx.ip = -1
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 0, ctx.IP())
|
||||
|
||||
// ctx.ip will be set to offset
|
||||
// because the there is a false boolean
|
||||
// on top of the stack.
|
||||
// offset = ctx.IP() + int(ctx.ReadInt16()) - 3
|
||||
// = 0 + 5 -3 = 2
|
||||
_, err := v.executeOp(stack.JMPIFNOT, ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Stack should have one item
|
||||
assert.Equal(t, 0, ctx.Estack.Len())
|
||||
|
||||
// ctx.IP() = ctx.ip + 1
|
||||
assert.Equal(t, 3, ctx.IP())
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue