2018-01-26 18:04:13 +00:00
|
|
|
package network
|
|
|
|
|
|
|
|
import (
|
2018-03-14 09:36:59 +00:00
|
|
|
"errors"
|
2018-01-26 18:04:13 +00:00
|
|
|
"fmt"
|
2019-08-23 15:19:07 +00:00
|
|
|
"math/rand"
|
2019-09-13 17:38:34 +00:00
|
|
|
"net"
|
2018-03-14 09:36:59 +00:00
|
|
|
"sync"
|
2018-01-28 10:12:05 +00:00
|
|
|
"time"
|
2018-01-27 15:00:28 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
"github.com/CityOfZion/neo-go/pkg/core"
|
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
|
|
|
"github.com/CityOfZion/neo-go/pkg/core/transaction"
|
2018-03-14 09:36:59 +00:00
|
|
|
"github.com/CityOfZion/neo-go/pkg/network/payload"
|
2018-02-01 18:54:23 +00:00
|
|
|
"github.com/CityOfZion/neo-go/pkg/util"
|
2018-03-14 09:36:59 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
2018-01-26 18:04:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-09-12 13:19:18 +00:00
|
|
|
// peer numbers are arbitrary at the moment
|
2019-09-13 17:38:34 +00:00
|
|
|
minPeers = 5
|
|
|
|
maxPeers = 20
|
|
|
|
maxBlockBatch = 200
|
|
|
|
maxAddrsToSend = 200
|
|
|
|
minPoolCount = 30
|
2018-01-26 18:04:13 +00:00
|
|
|
)
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
var (
|
|
|
|
errIdenticalID = errors.New("identical node id")
|
|
|
|
errInvalidHandshake = errors.New("invalid handshake")
|
|
|
|
errInvalidNetwork = errors.New("invalid network")
|
|
|
|
errServerShutdown = errors.New("server shutdown")
|
|
|
|
errInvalidInvType = errors.New("invalid inventory type")
|
|
|
|
)
|
2018-02-01 20:28:45 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
type (
|
|
|
|
// Server represents the local Node in the network. Its transport could
|
|
|
|
// be of any kind.
|
|
|
|
Server struct {
|
2018-03-15 20:45:37 +00:00
|
|
|
// ServerConfig holds the Server configuration.
|
|
|
|
ServerConfig
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2018-03-23 20:36:59 +00:00
|
|
|
// id also known as the nonce of the server.
|
2018-03-14 09:36:59 +00:00
|
|
|
id uint32
|
2018-01-26 18:04:13 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
transport Transporter
|
|
|
|
discovery Discoverer
|
|
|
|
chain core.Blockchainer
|
2019-09-25 16:54:31 +00:00
|
|
|
bQueue *blockQueue
|
2018-01-26 18:04:13 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
lock sync.RWMutex
|
|
|
|
peers map[Peer]bool
|
2018-01-26 20:39:34 +00:00
|
|
|
|
2019-09-13 09:03:07 +00:00
|
|
|
addrReq chan *Message
|
2018-03-14 09:36:59 +00:00
|
|
|
register chan Peer
|
|
|
|
unregister chan peerDrop
|
|
|
|
quit chan struct{}
|
|
|
|
}
|
2018-03-10 12:04:06 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
peerDrop struct {
|
|
|
|
peer Peer
|
|
|
|
reason error
|
2018-02-01 08:00:42 +00:00
|
|
|
}
|
2018-03-14 09:36:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// NewServer returns a new Server, initialized with the given configuration.
|
2019-02-09 15:53:58 +00:00
|
|
|
func NewServer(config ServerConfig, chain core.Blockchainer) *Server {
|
2018-03-09 15:55:25 +00:00
|
|
|
s := &Server{
|
2018-03-15 20:45:37 +00:00
|
|
|
ServerConfig: config,
|
|
|
|
chain: chain,
|
2019-09-25 16:54:31 +00:00
|
|
|
bQueue: newBlockQueue(maxBlockBatch, chain),
|
2019-08-23 15:19:07 +00:00
|
|
|
id: rand.Uint32(),
|
2018-03-15 20:45:37 +00:00
|
|
|
quit: make(chan struct{}),
|
2019-09-13 16:50:33 +00:00
|
|
|
addrReq: make(chan *Message, minPeers),
|
2018-03-15 20:45:37 +00:00
|
|
|
register: make(chan Peer),
|
|
|
|
unregister: make(chan peerDrop),
|
|
|
|
peers: make(map[Peer]bool),
|
2018-01-26 18:04:13 +00:00
|
|
|
}
|
|
|
|
|
2018-03-15 20:45:37 +00:00
|
|
|
s.transport = NewTCPTransport(s, fmt.Sprintf(":%d", config.ListenTCP))
|
2018-03-14 09:36:59 +00:00
|
|
|
s.discovery = NewDefaultDiscovery(
|
|
|
|
s.DialTimeout,
|
|
|
|
s.transport,
|
|
|
|
)
|
2018-01-26 18:04:13 +00:00
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
return s
|
2018-01-30 10:56:36 +00:00
|
|
|
}
|
|
|
|
|
2018-03-23 20:36:59 +00:00
|
|
|
// ID returns the servers ID.
|
|
|
|
func (s *Server) ID() uint32 {
|
|
|
|
return s.id
|
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// Start will start the server and its underlying transport.
|
2018-03-23 20:36:59 +00:00
|
|
|
func (s *Server) Start(errChan chan error) {
|
2018-03-17 11:53:21 +00:00
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"blockHeight": s.chain.BlockHeight(),
|
|
|
|
"headerHeight": s.chain.HeaderHeight(),
|
|
|
|
}).Info("node started")
|
|
|
|
|
2019-09-12 13:19:18 +00:00
|
|
|
s.discovery.BackFill(s.Seeds...)
|
2018-04-13 10:14:08 +00:00
|
|
|
|
2019-09-25 16:54:31 +00:00
|
|
|
go s.bQueue.run()
|
2018-03-14 09:36:59 +00:00
|
|
|
go s.transport.Accept()
|
|
|
|
s.run()
|
2018-01-31 19:11:08 +00:00
|
|
|
}
|
2018-01-30 10:56:36 +00:00
|
|
|
|
2018-03-23 20:36:59 +00:00
|
|
|
// Shutdown disconnects all peers and stops listening.
|
|
|
|
func (s *Server) Shutdown() {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"peers": s.PeerCount(),
|
|
|
|
}).Info("shutting down server")
|
2019-09-25 16:54:31 +00:00
|
|
|
s.bQueue.discard()
|
2018-03-23 20:36:59 +00:00
|
|
|
close(s.quit)
|
|
|
|
}
|
|
|
|
|
2018-04-09 16:58:09 +00:00
|
|
|
// UnconnectedPeers returns a list of peers that are in the discovery peer list
|
|
|
|
// but are not connected to the server.
|
2018-03-23 20:36:59 +00:00
|
|
|
func (s *Server) UnconnectedPeers() []string {
|
2018-04-13 10:14:08 +00:00
|
|
|
return []string{}
|
2018-03-23 20:36:59 +00:00
|
|
|
}
|
|
|
|
|
2018-04-09 16:58:09 +00:00
|
|
|
// BadPeers returns a list of peers the are flagged as "bad" peers.
|
2018-03-23 20:36:59 +00:00
|
|
|
func (s *Server) BadPeers() []string {
|
2018-04-13 10:14:08 +00:00
|
|
|
return []string{}
|
2018-03-23 20:36:59 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
func (s *Server) run() {
|
2018-03-09 15:55:25 +00:00
|
|
|
for {
|
2019-09-12 13:19:18 +00:00
|
|
|
c := s.PeerCount()
|
|
|
|
if c < minPeers {
|
|
|
|
s.discovery.RequestRemote(maxPeers - c)
|
|
|
|
}
|
2019-09-13 09:03:07 +00:00
|
|
|
if s.discovery.PoolCount() < minPoolCount {
|
|
|
|
select {
|
|
|
|
case s.addrReq <- NewMessage(s.Net, CMDGetAddr, payload.NewNullPayload()):
|
|
|
|
// sent request
|
|
|
|
default:
|
|
|
|
// we have one in the queue already that is
|
|
|
|
// gonna be served by some worker when it's ready
|
|
|
|
}
|
|
|
|
}
|
2018-03-14 09:36:59 +00:00
|
|
|
select {
|
|
|
|
case <-s.quit:
|
|
|
|
s.transport.Close()
|
2018-03-15 20:45:37 +00:00
|
|
|
for p := range s.peers {
|
2018-03-14 09:36:59 +00:00
|
|
|
p.Disconnect(errServerShutdown)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
case p := <-s.register:
|
|
|
|
// When a new peer is connected we send out our version immediately.
|
2019-02-09 15:53:58 +00:00
|
|
|
if err := s.sendVersion(p); err != nil {
|
|
|
|
log.WithFields(log.Fields{
|
2019-09-09 14:54:38 +00:00
|
|
|
"addr": p.NetAddr(),
|
2019-02-09 15:53:58 +00:00
|
|
|
}).Error(err)
|
|
|
|
}
|
2018-03-14 09:36:59 +00:00
|
|
|
s.peers[p] = true
|
|
|
|
log.WithFields(log.Fields{
|
2019-09-09 14:54:38 +00:00
|
|
|
"addr": p.NetAddr(),
|
2018-03-14 09:36:59 +00:00
|
|
|
}).Info("new peer connected")
|
|
|
|
case drop := <-s.unregister:
|
2019-09-13 12:36:53 +00:00
|
|
|
if s.peers[drop.peer] {
|
|
|
|
delete(s.peers, drop.peer)
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"addr": drop.peer.NetAddr(),
|
|
|
|
"reason": drop.reason,
|
|
|
|
"peerCount": s.PeerCount(),
|
|
|
|
}).Warn("peer disconnected")
|
2019-09-13 16:51:58 +00:00
|
|
|
addr := drop.peer.NetAddr().String()
|
|
|
|
s.discovery.UnregisterConnectedAddr(addr)
|
|
|
|
s.discovery.BackFill(addr)
|
2019-09-13 12:36:53 +00:00
|
|
|
}
|
|
|
|
// else the peer is already gone, which can happen
|
|
|
|
// because we have two goroutines sending signals here
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
2018-01-31 19:11:08 +00:00
|
|
|
}
|
2018-01-27 12:39:07 +00:00
|
|
|
}
|
|
|
|
|
2018-03-23 20:36:59 +00:00
|
|
|
// Peers returns the current list of peers connected to
|
|
|
|
// the server.
|
|
|
|
func (s *Server) Peers() map[Peer]bool {
|
|
|
|
return s.peers
|
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// PeerCount returns the number of current connected peers.
|
|
|
|
func (s *Server) PeerCount() int {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
return len(s.peers)
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// startProtocol starts a long running background loop that interacts
|
|
|
|
// every ProtoTickInterval with the peer.
|
|
|
|
func (s *Server) startProtocol(p Peer) {
|
|
|
|
log.WithFields(log.Fields{
|
2019-09-09 14:54:38 +00:00
|
|
|
"addr": p.NetAddr(),
|
2018-03-14 09:36:59 +00:00
|
|
|
"userAgent": string(p.Version().UserAgent),
|
|
|
|
"startHeight": p.Version().StartHeight,
|
|
|
|
"id": p.Version().Nonce,
|
|
|
|
}).Info("started protocol")
|
|
|
|
|
2019-09-13 16:51:58 +00:00
|
|
|
s.discovery.RegisterGoodAddr(p.NetAddr().String())
|
2019-09-13 12:36:53 +00:00
|
|
|
err := s.requestHeaders(p)
|
|
|
|
if err != nil {
|
|
|
|
p.Disconnect(err)
|
|
|
|
return
|
|
|
|
}
|
2018-01-31 19:11:08 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
timer := time.NewTimer(s.ProtoTickInterval)
|
|
|
|
for {
|
|
|
|
select {
|
2019-09-13 12:36:53 +00:00
|
|
|
case err = <-p.Done():
|
|
|
|
// time to stop
|
2019-09-13 09:03:07 +00:00
|
|
|
case m := <-s.addrReq:
|
2019-09-13 12:36:53 +00:00
|
|
|
err = p.WriteMsg(m)
|
2018-03-14 09:36:59 +00:00
|
|
|
case <-timer.C:
|
|
|
|
// Try to sync in headers and block with the peer if his block height is higher then ours.
|
|
|
|
if p.Version().StartHeight > s.chain.BlockHeight() {
|
2019-09-13 12:36:53 +00:00
|
|
|
err = s.requestBlocks(p)
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
timer.Reset(s.ProtoTickInterval)
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
2019-09-13 12:36:53 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
s.unregister <- peerDrop{p, err}
|
|
|
|
timer.Stop()
|
|
|
|
p.Disconnect(err)
|
|
|
|
return
|
2018-02-06 06:43:32 +00:00
|
|
|
}
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// When a peer connects to the server, we will send our version immediately.
|
2018-04-13 10:14:08 +00:00
|
|
|
func (s *Server) sendVersion(p Peer) error {
|
2018-03-14 09:36:59 +00:00
|
|
|
payload := payload.NewVersion(
|
|
|
|
s.id,
|
|
|
|
s.ListenTCP,
|
|
|
|
s.UserAgent,
|
|
|
|
s.chain.BlockHeight(),
|
|
|
|
s.Relay,
|
2018-03-09 15:55:25 +00:00
|
|
|
)
|
2019-09-13 12:43:22 +00:00
|
|
|
return p.SendVersion(NewMessage(s.Net, CMDVersion, payload))
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
2018-03-09 15:55:25 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// When a peer sends out his version we reply with verack after validating
|
|
|
|
// the version.
|
|
|
|
func (s *Server) handleVersionCmd(p Peer, version *payload.Version) error {
|
2019-09-13 12:43:22 +00:00
|
|
|
err := p.HandleVersion(version)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
|
|
|
if s.id == version.Nonce {
|
|
|
|
return errIdenticalID
|
2018-01-28 13:59:32 +00:00
|
|
|
}
|
2019-09-13 12:43:22 +00:00
|
|
|
if p.NetAddr().Port != int(version.Port) {
|
2019-09-13 17:32:16 +00:00
|
|
|
return fmt.Errorf("port mismatch: connected to %d and peer sends %d", p.NetAddr().Port, version.Port)
|
2019-09-13 12:43:22 +00:00
|
|
|
}
|
|
|
|
return p.SendVersionAck(NewMessage(s.Net, CMDVerack, nil))
|
2018-01-28 13:59:32 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// handleHeadersCmd will process the headers it received from its peer.
|
|
|
|
// if the headerHeight of the blockchain still smaller then the peer
|
|
|
|
// the server will request more headers.
|
|
|
|
// This method could best be called in a separate routine.
|
|
|
|
func (s *Server) handleHeadersCmd(p Peer, headers *payload.Headers) {
|
|
|
|
if err := s.chain.AddHeaders(headers.Hdrs...); err != nil {
|
|
|
|
log.Warnf("failed processing headers: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// The peer will respond with a maximum of 2000 headers in one batch.
|
|
|
|
// We will ask one more batch here if needed. Eventually we will get synced
|
|
|
|
// due to the startProtocol routine that will ask headers every protoTick.
|
|
|
|
if s.chain.HeaderHeight() < p.Version().StartHeight {
|
|
|
|
s.requestHeaders(p)
|
2018-01-28 10:12:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// handleBlockCmd processes the received block received from its peer.
|
|
|
|
func (s *Server) handleBlockCmd(p Peer, block *core.Block) error {
|
2019-09-25 16:54:31 +00:00
|
|
|
return s.bQueue.putBlock(block)
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
2018-02-01 08:00:42 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// handleInvCmd will process the received inventory.
|
|
|
|
func (s *Server) handleInvCmd(p Peer, inv *payload.Inventory) error {
|
|
|
|
if !inv.Type.Valid() || len(inv.Hashes) == 0 {
|
|
|
|
return errInvalidInvType
|
2018-02-01 08:00:42 +00:00
|
|
|
}
|
2018-03-14 09:36:59 +00:00
|
|
|
payload := payload.NewInventory(inv.Type, inv.Hashes)
|
2019-01-25 11:20:35 +00:00
|
|
|
return p.WriteMsg(NewMessage(s.Net, CMDGetData, payload))
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
2018-02-01 08:00:42 +00:00
|
|
|
|
2019-09-13 09:03:07 +00:00
|
|
|
// handleAddrCmd will process received addresses.
|
|
|
|
func (s *Server) handleAddrCmd(p Peer, addrs *payload.AddressList) error {
|
|
|
|
for _, a := range addrs.Addrs {
|
|
|
|
s.discovery.BackFill(a.IPPortString())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-13 17:38:34 +00:00
|
|
|
// handleGetAddrCmd sends to the peer some good addresses that we know of.
|
|
|
|
func (s *Server) handleGetAddrCmd(p Peer) error {
|
|
|
|
addrs := s.discovery.GoodPeers()
|
|
|
|
if len(addrs) > maxAddrsToSend {
|
|
|
|
addrs = addrs[:maxAddrsToSend]
|
|
|
|
}
|
|
|
|
alist := payload.NewAddressList(len(addrs))
|
|
|
|
ts := time.Now()
|
|
|
|
for i, addr := range addrs {
|
|
|
|
// we know it's a good address, so it can't fail
|
|
|
|
netaddr, _ := net.ResolveTCPAddr("tcp", addr)
|
|
|
|
alist.Addrs[i] = payload.NewAddressAndTime(netaddr, ts)
|
|
|
|
}
|
|
|
|
return p.WriteMsg(NewMessage(s.Net, CMDAddr, alist))
|
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// requestHeaders will send a getheaders message to the peer.
|
|
|
|
// The peer will respond with headers op to a count of 2000.
|
2019-09-13 12:36:53 +00:00
|
|
|
func (s *Server) requestHeaders(p Peer) error {
|
2018-03-14 09:36:59 +00:00
|
|
|
start := []util.Uint256{s.chain.CurrentHeaderHash()}
|
|
|
|
payload := payload.NewGetBlocks(start, util.Uint256{})
|
2019-09-13 12:36:53 +00:00
|
|
|
return p.WriteMsg(NewMessage(s.Net, CMDGetHeaders, payload))
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// requestBlocks will send a getdata message to the peer
|
|
|
|
// to sync up in blocks. A maximum of maxBlockBatch will
|
|
|
|
// send at once.
|
2019-09-13 12:36:53 +00:00
|
|
|
func (s *Server) requestBlocks(p Peer) error {
|
2018-03-14 09:36:59 +00:00
|
|
|
var (
|
2019-02-09 15:53:58 +00:00
|
|
|
hashes []util.Uint256
|
2018-03-14 09:36:59 +00:00
|
|
|
hashStart = s.chain.BlockHeight() + 1
|
|
|
|
headerHeight = s.chain.HeaderHeight()
|
|
|
|
)
|
2019-09-26 17:18:17 +00:00
|
|
|
for hashStart <= headerHeight && len(hashes) < maxBlockBatch {
|
2018-03-14 09:36:59 +00:00
|
|
|
hash := s.chain.GetHeaderHash(int(hashStart))
|
|
|
|
hashes = append(hashes, hash)
|
|
|
|
hashStart++
|
|
|
|
}
|
|
|
|
if len(hashes) > 0 {
|
|
|
|
payload := payload.NewInventory(payload.BlockType, hashes)
|
2019-09-13 12:36:53 +00:00
|
|
|
return p.WriteMsg(NewMessage(s.Net, CMDGetData, payload))
|
2018-03-14 09:36:59 +00:00
|
|
|
} else if s.chain.HeaderHeight() < p.Version().StartHeight {
|
2019-09-13 12:36:53 +00:00
|
|
|
return s.requestHeaders(p)
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
2019-09-13 12:36:53 +00:00
|
|
|
return nil
|
2018-02-01 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 10:14:08 +00:00
|
|
|
// handleMessage will process the given message.
|
|
|
|
func (s *Server) handleMessage(peer Peer, msg *Message) error {
|
2018-03-14 09:36:59 +00:00
|
|
|
// Make sure both server and peer are operating on
|
|
|
|
// the same network.
|
|
|
|
if msg.Magic != s.Net {
|
|
|
|
return errInvalidNetwork
|
|
|
|
}
|
|
|
|
|
2019-09-13 12:43:22 +00:00
|
|
|
if peer.Handshaked() {
|
|
|
|
switch msg.CommandType() {
|
|
|
|
case CMDAddr:
|
|
|
|
addrs := msg.Payload.(*payload.AddressList)
|
|
|
|
return s.handleAddrCmd(peer, addrs)
|
2019-09-13 17:38:34 +00:00
|
|
|
case CMDGetAddr:
|
|
|
|
// it has no payload
|
|
|
|
return s.handleGetAddrCmd(peer)
|
2019-09-13 12:43:22 +00:00
|
|
|
case CMDHeaders:
|
|
|
|
headers := msg.Payload.(*payload.Headers)
|
|
|
|
go s.handleHeadersCmd(peer, headers)
|
|
|
|
case CMDInv:
|
|
|
|
inventory := msg.Payload.(*payload.Inventory)
|
|
|
|
return s.handleInvCmd(peer, inventory)
|
|
|
|
case CMDBlock:
|
|
|
|
block := msg.Payload.(*core.Block)
|
|
|
|
return s.handleBlockCmd(peer, block)
|
|
|
|
case CMDVersion, CMDVerack:
|
|
|
|
return fmt.Errorf("received '%s' after the handshake", msg.CommandType())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch msg.CommandType() {
|
|
|
|
case CMDVersion:
|
|
|
|
version := msg.Payload.(*payload.Version)
|
|
|
|
return s.handleVersionCmd(peer, version)
|
|
|
|
case CMDVerack:
|
|
|
|
err := peer.HandleVersionAck()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
go s.startProtocol(peer)
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("received '%s' during handshake", msg.CommandType())
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2018-01-26 18:04:13 +00:00
|
|
|
}
|
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
|
|
|
|
|
|
|
// RelayTxn a new transaction to the local node and the connected peers.
|
|
|
|
// Reference: the method OnRelay in C#: https://github.com/neo-project/neo/blob/master/neo/Network/P2P/LocalNode.cs#L159
|
|
|
|
func (s *Server) RelayTxn(t *transaction.Transaction) RelayReason {
|
|
|
|
if t.Type == transaction.MinerType {
|
|
|
|
return RelayInvalid
|
|
|
|
}
|
|
|
|
if s.chain.HasTransaction(t.Hash()) {
|
|
|
|
return RelayAlreadyExists
|
|
|
|
}
|
2019-10-11 14:00:11 +00:00
|
|
|
if err := s.chain.VerifyTx(t, nil); err != nil {
|
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
|
|
|
return RelayInvalid
|
|
|
|
}
|
|
|
|
// TODO: Implement Plugin.CheckPolicy?
|
|
|
|
//if (!Plugin.CheckPolicy(transaction))
|
|
|
|
// return RelayResultReason.PolicyFail;
|
|
|
|
if ok := s.chain.GetMemPool().TryAdd(t.Hash(), core.NewPoolItem(t, s.chain)); !ok {
|
|
|
|
return RelayOutOfMemory
|
|
|
|
}
|
|
|
|
|
|
|
|
for p := range s.Peers() {
|
|
|
|
payload := payload.NewInventory(payload.TXType, []util.Uint256{t.Hash()})
|
|
|
|
s.RelayDirectly(p, payload)
|
|
|
|
}
|
|
|
|
|
|
|
|
return RelaySucceed
|
|
|
|
}
|
|
|
|
|
|
|
|
// RelayDirectly relay directly the inventory to the remote peers.
|
|
|
|
// Reference: the method OnRelayDirectly in C#: https://github.com/neo-project/neo/blob/master/neo/Network/P2P/LocalNode.cs#L166
|
|
|
|
func (s *Server) RelayDirectly(p Peer, inv *payload.Inventory) {
|
|
|
|
if !p.Version().Relay {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
p.WriteMsg(NewMessage(s.Net, CMDInv, inv))
|
|
|
|
|
|
|
|
}
|
2019-08-23 15:19:07 +00:00
|
|
|
|
|
|
|
func init() {
|
2019-09-09 08:23:27 +00:00
|
|
|
rand.Seed(time.Now().UTC().UnixNano())
|
2019-08-23 15:19:07 +00:00
|
|
|
}
|