Remove extraneous packages

This commit is contained in:
BlockChainDev 2019-02-25 22:44:56 +00:00
parent cda7a31e4e
commit 7eafd1ac17
25 changed files with 0 additions and 2275 deletions

View file

@ -1 +0,0 @@
FROM vidsyhq/go-base:latest

View file

@ -1,69 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
branch = "master"
name = "github.com/syndtr/goleveldb"
[[constraint]]
name = "github.com/urfave/cli"
version = "1.20.0"
[prune]
go-tests = true
unused-packages = true
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "github.com/anthdm/rfc6979"
[[constraint]]
name = "golang.org/x/text"
version = "0.3.0"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.1"
[[constraint]]
name = "github.com/sirupsen/logrus"
version = "1.0.5"
[[constraint]]
name = "github.com/pkg/errors"
version = "0.8.0"
[[constraint]]
name = "github.com/go-yaml/yaml"
version = "2.1.1"
[[constraint]]
name = "github.com/go-redis/redis"
version = "6.10.2"

View file

@ -1,57 +0,0 @@
BRANCH = "master"
BUILD_TIME = "$(shell date -u +\"%Y-%m-%dT%H:%M:%SZ\")"
VERSION = $(shell cat ./VERSION)
REPONAME = "neo-go"
NETMODE ?= "privnet"
build:
@echo "=> Building darwin binary"
@go build -i -ldflags "-X github.com/CityOfZion/neo-go/config.Version=${VERSION}-dev -X github.com/CityOfZion/neo-go/config.BuildTime=${BUILD_TIME}" -o ./bin/neo-go ./cli/main.go
build-image:
docker build -t cityofzion/neo-go --build-arg VERSION=${VERSION} .
build-linux:
@echo "=> Building linux binary"
@GOOS=linux go build -i -ldflags "-X github.com/CityOfZion/neo-go/config.Version=${VERSION}-dev -X github.com/CityOfZion/neo-go/config.BuildTime=${BUILD_TIME}" -o ./bin/neo-go ./cli/main.go
check-version:
git fetch && (! git rev-list ${VERSION})
clean-cluster:
@echo "=> Removing all containers and chain storage"
@rm -rf chains/privnet-docker-one chains/privnet-docker-two chains/privnet-docker-three chains/privnet-docker-four
@docker-compose stop
@docker-compose rm -f
deps:
@dep ensure
push-tag:
git checkout ${BRANCH}
git pull origin ${BRANCH}
git tag ${VERSION}
git push origin ${VERSION}
push-to-registry:
@docker login -e ${DOCKER_EMAIL} -u ${DOCKER_USER} -p ${DOCKER_PASS}
@docker tag CityOfZion/${REPONAME}:latest CityOfZion/${REPONAME}:${CIRCLE_TAG}
@docker push CityOfZion/${REPONAME}:${CIRCLE_TAG}
@docker push CityOfZion/${REPONAME}
run: build
./bin/neo-go node -config-path ./config -${NETMODE}
run-cluster: build-linux
@echo "=> Starting docker-compose cluster"
@echo "=> Building container image"
@docker-compose build
@docker-compose up -d
@echo "=> Tailing logs, exiting this prompt will not stop the cluster"
@docker-compose logs -f
test:
@go test ./... -cover
vet:
@go vet ./...

View file

@ -1 +0,0 @@
0.44.8

View file

@ -1,152 +0,0 @@
version: 2
jobs:
install_deps:
working_directory: /go/src/github.com/CityOfZion/neo-go
docker:
- image: vidsyhq/go-builder:latest
environment:
BUILD: false
steps:
- checkout
- restore_cache:
key: dependency-cache-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
- run: /scripts/build.sh
- save_cache:
key: dependency-cache-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
paths:
- vendor
- /go/pkg
build_image:
working_directory: /go/src/github.com/CityOfZion/neo-go
docker:
- image: alpine
steps:
- run: apk update && apk add git make curl tar
- checkout
- restore_cache:
keys:
- dependency-cache-{{ .Revision }}
- restore_cache:
keys:
- dependency-cache-cli-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
- setup_remote_docker
- run:
name: Install Docker client
command: |
set -x
VER="17.03.0-ce"
curl -L -o /tmp/docker-$VER.tgz https://get.docker.com/builds/Linux/x86_64/docker-$VER.tgz
tar -xz -C /tmp -f /tmp/docker-$VER.tgz
mv /tmp/docker/* /usr/bin
- run: make build-image
test:
working_directory: /go/src/github.com/CityOfZion/neo-go
docker:
- image: vidsyhq/go-builder:latest
steps:
- checkout
- restore_cache:
key: dependency-cache-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
- run: make test
vet:
working_directory: /go/src/github.com/CityOfZion/neo-go
docker:
- image: vidsyhq/go-builder:latest
steps:
- checkout
- restore_cache:
key: dependency-cache-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
- run: make vet
check_version:
working_directory: /go/src/github.com/CityOfZion/neo-go
docker:
- image: vidsyhq/go-builder:latest
steps:
- checkout
- run: make check-version
build_cli:
working_directory: /go/src/github.com/CityOfZion/neo-go
docker:
- image: vidsyhq/go-builder:latest
steps:
- checkout
- restore_cache:
key: dependency-cache-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
- run: make build
- save_cache:
key: dependency-cache-cli-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
paths:
- bin/neo-go
deploy:
working_directory: /go/src/github.com/CityOfZion/neo-go
docker:
- image: alpine
steps:
- run: apk update && apk add git make curl tar
- checkout
- restore_cache:
key: dependency-cache-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
- restore_cache:
key: dependency-cache-cli-{{ checksum "Gopkg.toml" }}-{{ checksum "VERSION" }}
- setup_remote_docker
- run:
name: Install Docker client
command: |
set -x
VER="17.03.0-ce"
curl -L -o /tmp/docker-$VER.tgz https://get.docker.com/builds/Linux/x86_64/docker-$VER.tgz
tar -xz -C /tmp -f /tmp/docker-$VER.tgz
mv /tmp/docker/* /usr/bin
- run: make build-image
- deploy:
name: deploy
command: make push-to-registry
workflows:
version: 2
workflow:
jobs:
- install_deps:
filters:
tags:
only: /[0-9]+\.[0-9]+\.[0-9]+/
- test:
requires:
- install_deps
filters:
tags:
only: /[0-9]+\.[0-9]+\.[0-9]+/
- vet:
requires:
- install_deps
filters:
tags:
only: /[0-9]+\.[0-9]+\.[0-9]+/
- check_version:
filters:
branches:
ignore: master
- build_cli:
requires:
- install_deps
filters:
tags:
only: /[0-9]+\.[0-9]+\.[0-9]+/
- build_image:
requires:
- install_deps
- build_cli
filters:
tags:
only: /[0-9]+\.[0-9]+\.[0-9]+/
- deploy:
requires:
- build_image
- test
- vet
- check_version
filters:
tags:
only:
- /[0-9]+\.[0-9]+\.[0-9]+/
branches:
ignore: /.*/

View file

@ -1,285 +0,0 @@
package addrmgr
import (
"errors"
"fmt"
"math/rand"
"sync"
"time"
"github.com/CityOfZion/neo-go/pkg/peer"
"github.com/CityOfZion/neo-go/pkg/wire/payload"
)
const (
maxTries = 5 // Try to connect five times, if we cannot then we say it is bad
// We will keep bad addresses, so that we do not attempt to connect to them in the future.
// nodes at the moment seem to send a large percentage of bad nodes.
maxFailures = 12 // This will be incremented when we connect to a node and for whatever reason, they keep disconnecting.
// The number is high because there could be a period of time, where a node is behaving unrepsonsively and
// we do not want to immmediately put it inside of the bad bucket.
// This is the maximum amount of addresses that the addrmgr will hold
maxAllowedAddrs = 2000
)
type addrStats struct {
tries uint8
failures uint8
permanent bool // permanent = inbound, temp = outbound
lastTried time.Time
lastSuccess time.Time
}
type Addrmgr struct {
addrmtx sync.RWMutex
goodAddrs map[*payload.Net_addr]addrStats
newAddrs map[*payload.Net_addr]struct{}
badAddrs map[*payload.Net_addr]addrStats
knownList map[string]*payload.Net_addr // this contains all of the addresses in badAddrs, newAddrs and goodAddrs
}
func New() *Addrmgr {
return &Addrmgr{
sync.RWMutex{},
make(map[*payload.Net_addr]addrStats, 100),
make(map[*payload.Net_addr]struct{}, 100),
make(map[*payload.Net_addr]addrStats, 100),
make(map[string]*payload.Net_addr, 100),
}
}
//AddAddrs will add new add new addresses into the newaddr list
// This is safe for concurrent access.
func (a *Addrmgr) AddAddrs(newAddrs []*payload.Net_addr) {
newAddrs = removeDuplicates(newAddrs)
var nas []*payload.Net_addr
for _, addr := range newAddrs {
a.addrmtx.Lock()
if _, ok := a.knownList[addr.IPPort()]; !ok { // filter unique addresses
nas = append(nas, addr)
}
a.addrmtx.Unlock()
}
for _, addr := range nas {
a.addrmtx.Lock()
a.newAddrs[addr] = struct{}{}
a.knownList[addr.IPPort()] = addr
a.addrmtx.Unlock()
}
}
// Good returns the good addresses.
// A good address is:
// - Known
// - has been successfully connected to in the last week
// - Note: that while users are launching full nodes from their laptops, the one week marker will need to be modified.
func (a *Addrmgr) Good() []payload.Net_addr {
var goodAddrs []payload.Net_addr
var oneWeekAgo = time.Now().Add(((time.Hour * 24) * 7) * -1)
a.addrmtx.RLock()
// TODO: sort addresses, permanent ones go first
for addr, stat := range a.goodAddrs {
if stat.lastTried.Before(oneWeekAgo) {
continue
}
goodAddrs = append(goodAddrs, *addr)
}
a.addrmtx.RUnlock()
return goodAddrs
}
// Unconnected Addrs are addresses in the newAddr List
func (a *Addrmgr) Unconnected() []payload.Net_addr {
var unconnAddrs []payload.Net_addr
a.addrmtx.RLock()
for addr := range a.newAddrs {
unconnAddrs = append(unconnAddrs, *addr)
}
a.addrmtx.RUnlock()
return unconnAddrs
}
// Bad Addrs are addresses in the badAddr List
// They are put there if we have tried to connect
// to them in the past and it failed.
func (a *Addrmgr) Bad() []payload.Net_addr {
var badAddrs []payload.Net_addr
a.addrmtx.RLock()
for addr := range a.badAddrs {
badAddrs = append(badAddrs, *addr)
}
a.addrmtx.RUnlock()
return badAddrs
}
// FetchMoreAddresses will return true if the numOfKnownAddrs are less than 100
// This number is kept low because at the moment, there are not a lot of Good Addresses
// Tests have shown that at most there are < 100
func (a *Addrmgr) FetchMoreAddresses() bool {
var numOfKnownAddrs int
a.addrmtx.RLock()
numOfKnownAddrs = len(a.knownList)
a.addrmtx.RUnlock()
return numOfKnownAddrs < maxAllowedAddrs
}
// ConnectionComplete will be called by the server when we have done the handshake
// and Not when we have successfully Connected with net.Conn
// It is to tell the AddrMgr that we have connected to a peer
// This peer should already be known to the AddrMgr because
// We send it to the Connmgr
func (a *Addrmgr) ConnectionComplete(addressport string, inbound bool) {
a.addrmtx.Lock()
defer a.addrmtx.Unlock()
// if addrmgr does not know this, then we just return
if _, ok := a.knownList[addressport]; !ok {
fmt.Println("Connected to an unknown address:port ", addressport)
return
}
na := a.knownList[addressport]
// move it from newAddrs List to GoodAddr List
stats := a.goodAddrs[na]
stats.lastSuccess = time.Now()
stats.lastTried = time.Now()
stats.permanent = inbound
stats.tries++
a.goodAddrs[na] = stats
// remove it from new and bad, if it was there
delete(a.newAddrs, na)
delete(a.badAddrs, na)
// Unfortunately, Will have a lot of bad nodes because of people connecting to nodes
// from their laptop. TODO Sort function in good will mitigate.
}
// Failed will be called by ConnMgr
// It is used to tell the AddrMgr that they had tried connecting an address an have failed
// This is concurrent safe
func (a *Addrmgr) Failed(addressport string) {
a.addrmtx.Lock()
defer a.addrmtx.Unlock()
// if addrmgr does not know this, then we just return
if _, ok := a.knownList[addressport]; !ok {
fmt.Println("Connected to an unknown address:port ", addressport)
return
}
na := a.knownList[addressport]
// HMM: logic here could be simpler if we make it one list instead
if stats, ok := a.badAddrs[na]; ok {
stats.lastTried = time.Now()
stats.failures++
stats.tries++
if float32(stats.failures)/float32(stats.tries) > 0.8 && stats.tries > 5 {
delete(a.badAddrs, na)
return
}
a.badAddrs[na] = stats
return
}
if stats, ok := a.goodAddrs[na]; ok {
fmt.Println("We have a good addr", na.IPPort())
stats.lastTried = time.Now()
stats.failures++
stats.tries++
if float32(stats.failures)/float32(stats.tries) > 0.5 && stats.tries > 10 {
delete(a.goodAddrs, na)
a.badAddrs[na] = stats
return
}
a.goodAddrs[na] = stats
return
}
if _, ok := a.newAddrs[na]; ok {
delete(a.newAddrs, na)
a.badAddrs[na] = addrStats{}
}
}
//OnAddr is the responder for the Config file
// when a OnAddr is received by a peer
func (a *Addrmgr) OnAddr(p *peer.Peer, msg *payload.AddrMessage) {
a.AddAddrs(msg.AddrList)
}
// OnGetAddr Is called when a peer sends a request for the addressList
// We will give them the best addresses we have from good.
func (a *Addrmgr) OnGetAddr(p *peer.Peer, msg *payload.GetAddrMessage) {
a.addrmtx.RLock()
defer a.addrmtx.RUnlock()
// Push most recent peers to peer
addrMsg, err := payload.NewAddrMessage()
if err != nil {
return
}
for _, add := range a.Good() {
addrMsg.AddNetAddr(&add)
}
p.Write(addrMsg)
}
// NewAddr will return an address for the external caller to
// connect to. In our case, it will be the connection manager.
func (a *Addrmgr) NewAddr() (string, error) {
// For now it just returns a random value from unconnected
// TODO: When an address is tried, the address manager is notified.
// When asked for a new address, this should be taken into account
// when choosing a new one, also the number of retries.
unconnected := a.Unconnected()
if len(unconnected) == 0 {
return "", errors.New("No Addresses to give")
}
rand := rand.Intn(len(unconnected))
return unconnected[rand].IPPort(), nil
}
// https://www.dotnetperls.com/duplicates-go
func removeDuplicates(elements []*payload.Net_addr) []*payload.Net_addr {
encountered := map[string]bool{}
result := []*payload.Net_addr{}
for _, element := range elements {
if encountered[element.IPPort()] == true {
// Do not add duplicate.
} else {
// Record this element as an encountered element.
encountered[element.IPPort()] = true
// Append to result slice.
result = append(result, element)
}
}
// Return the new slice.
return result
}

View file

@ -1,167 +0,0 @@
package addrmgr_test
import (
"crypto/rand"
"testing"
"github.com/CityOfZion/neo-go/pkg/wire/payload"
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
"github.com/CityOfZion/neo-go/pkg/addrmgr"
"github.com/stretchr/testify/assert"
)
func TestNewAddrs(t *testing.T) {
addrmgr := addrmgr.New()
assert.NotEqual(t, nil, addrmgr)
}
func TestAddAddrs(t *testing.T) {
addrmgr := addrmgr.New()
ip := [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
addr, _ := payload.NewNetAddr(0, ip, 1033, protocol.NodePeerService)
ip = [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // same
addr2, _ := payload.NewNetAddr(0, ip, 1033, protocol.NodePeerService)
ip = [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr3, _ := payload.NewNetAddr(0, ip, 1033, protocol.NodePeerService)
addrs := []*payload.Net_addr{addr, addr2, addr, addr3}
addrmgr.AddAddrs(addrs)
assert.Equal(t, 2, len(addrmgr.Unconnected()))
assert.Equal(t, 0, len(addrmgr.Good()))
assert.Equal(t, 0, len(addrmgr.Bad()))
}
func TestFetchMoreAddress(t *testing.T) {
addrmgr := addrmgr.New()
addrs := []*payload.Net_addr{}
ip := make([]byte, 16)
for i := 0; i <= 2000; i++ { // Add more than maxAllowedAddrs
rand.Read(ip)
var nip [16]byte
copy(nip[:], ip[:16])
addr, _ := payload.NewNetAddr(0, nip, 1033, protocol.NodePeerService)
addrs = append(addrs, addr)
}
addrmgr.AddAddrs(addrs)
assert.Equal(t, false, addrmgr.FetchMoreAddresses())
}
func TestConnComplete(t *testing.T) {
addrmgr := addrmgr.New()
ip := [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr, _ := payload.NewNetAddr(0, ip, 1033, protocol.NodePeerService)
ip2 := [16]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr2, _ := payload.NewNetAddr(0, ip2, 1033, protocol.NodePeerService)
ip3 := [16]byte{3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr3, _ := payload.NewNetAddr(0, ip3, 1033, protocol.NodePeerService)
addrs := []*payload.Net_addr{addr, addr2, addr3}
addrmgr.AddAddrs(addrs)
assert.Equal(t, len(addrs), len(addrmgr.Unconnected()))
// a successful connection
addrmgr.ConnectionComplete(addr.IPPort(), true)
addrmgr.ConnectionComplete(addr.IPPort(), true) // should have no change
assert.Equal(t, len(addrs)-1, len(addrmgr.Unconnected()))
assert.Equal(t, 1, len(addrmgr.Good()))
// another successful connection
addrmgr.ConnectionComplete(addr2.IPPort(), true)
assert.Equal(t, len(addrs)-2, len(addrmgr.Unconnected()))
assert.Equal(t, 2, len(addrmgr.Good()))
}
func TestAttempted(t *testing.T) {
addrmgr := addrmgr.New()
ip := [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr, _ := payload.NewNetAddr(0, ip, 1033, protocol.NodePeerService)
addrs := []*payload.Net_addr{addr}
addrmgr.AddAddrs(addrs)
addrmgr.Failed(addr.IPPort())
assert.Equal(t, 1, len(addrmgr.Bad())) // newAddrs was attmepted and failed. Move to Bad
}
func TestAttemptedMoveFromGoodToBad(t *testing.T) {
addrmgr := addrmgr.New()
ip := [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr, _ := payload.NewNetAddr(0, ip, 1043, protocol.NodePeerService)
addrs := []*payload.Net_addr{addr}
addrmgr.AddAddrs(addrs)
addrmgr.ConnectionComplete(addr.IPPort(), true)
addrmgr.ConnectionComplete(addr.IPPort(), true)
addrmgr.ConnectionComplete(addr.IPPort(), true)
assert.Equal(t, 1, len(addrmgr.Good()))
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
addrmgr.Failed(addr.IPPort())
// over threshhold, and will be classed as a badAddr L251
assert.Equal(t, 0, len(addrmgr.Good()))
assert.Equal(t, 1, len(addrmgr.Bad()))
}
func TestGetAddress(t *testing.T) {
addrmgr := addrmgr.New()
ip := [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr, _ := payload.NewNetAddr(0, ip, 10333, protocol.NodePeerService)
ip2 := [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr2, _ := payload.NewNetAddr(0, ip2, 10334, protocol.NodePeerService)
ip3 := [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // different
addr3, _ := payload.NewNetAddr(0, ip3, 10335, protocol.NodePeerService)
addrs := []*payload.Net_addr{addr, addr2, addr3}
addrmgr.AddAddrs(addrs)
fetchAddr, err := addrmgr.NewAddr()
assert.Equal(t, nil, err)
ipports := []string{addr.IPPort(), addr2.IPPort(), addr3.IPPort()}
assert.Contains(t, ipports, fetchAddr)
}

View file

View file

@ -1,23 +0,0 @@
# Package - Address Manager
This package can be used as a standalone to manage addresses on the NEO network. Although you can use it for other chains, the config parameters have been modified for the state of NEO as of now.
## Responsibility
To manage the data that the node knows about addresses; data on good addresses, retry rates, failures, and lastSuccessful connection will be managed by the address manager. Also, If a service wants to fetch good address then it will be asked for from the address manager.
## Features
- On GetAddr it will give a list of good addresses to connect to
- On Addr it will receive addresses and remove any duplicates.
- General Management of Addresses
- Periodically saves the peers and metadata about peer into a .json file for retrieval (Not implemented yet)
## Note
The Address manager will not deal with making connections to nodes. Please check the tests for the use cases for this package.

View file

@ -1,187 +0,0 @@
package blockchain
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"github.com/CityOfZion/neo-go/pkg/chainparams"
"github.com/CityOfZion/neo-go/pkg/database"
"github.com/CityOfZion/neo-go/pkg/wire/payload"
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
)
var (
ErrBlockValidation = errors.New("Block failed sanity check")
ErrBlockVerification = errors.New("Block failed to be consistent with the current blockchain")
)
// Blockchain holds the state of the chain
type Chain struct {
db *database.LDB
net protocol.Magic
}
func New(db *database.LDB, net protocol.Magic) *Chain {
marker := []byte("HasBeenInitialisedAlready")
_, err := db.Get(marker)
if err != nil {
// This is a new db
fmt.Println("New Database initialisation")
db.Put(marker, []byte{})
// We add the genesis block into the db
// along with the indexes for it
if net == protocol.MainNet {
genesisBlock, err := hex.DecodeString(chainparams.GenesisBlock)
if err != nil {
fmt.Println("Could not add genesis header into db")
db.Delete(marker)
return nil
}
r := bytes.NewReader(genesisBlock)
b := payload.Block{}
err = b.Decode(r)
if err != nil {
fmt.Println("could not Decode genesis block")
db.Delete(marker)
return nil
}
err = db.AddHeader(&b.BlockBase)
if err != nil {
fmt.Println("Could not add genesis header")
db.Delete(marker)
return nil
}
err = db.AddTransactions(b.Hash, b.Txs)
if err != nil {
fmt.Println("Could not add Genesis Transactions")
db.Delete(marker)
return nil
}
}
if net == protocol.TestNet {
fmt.Println("TODO: Setup the genesisBlock for TestNet")
return nil
}
}
return &Chain{
db,
net,
}
}
func (c *Chain) AddBlock(msg *payload.BlockMessage) error {
if !validateBlock(msg) {
return ErrBlockValidation
}
if !c.verifyBlock(msg) {
return ErrBlockVerification
}
fmt.Println("Block Hash is ", msg.Hash.String())
buf := new(bytes.Buffer)
err := msg.Encode(buf)
if err != nil {
return err
}
return c.db.Put(msg.Hash.Bytes(), buf.Bytes())
}
// validateBlock will check the transactions,
// merkleroot is good, signature is good,every that does not require state
// This may be moved to the syncmanager. This function should not be done in a seperate go-routine
// We are intentionally blocking here because if the block is invalid, we will
// disconnect from the peer.
// We could have this return an error instead; where the error could even
// say where the validation failed, for the logs.
func validateBlock(msg *payload.BlockMessage) bool {
return true
}
func (c *Chain) verifyBlock(msg *payload.BlockMessage) bool {
return true
}
// This will add a header into the db,
// indexing it also, this method will not
// run any checks, like if it links with a header
// previously in the db
// func (c *Chain) addHeaderNoCheck(header *payload.BlockBase) error {
// }
//addHeaders is not safe for concurrent access
func (c *Chain) ValidateHeaders(msg *payload.HeadersMessage) error {
table := database.NewTable(c.db, database.HEADER)
latestHash, err := table.Get(database.LATESTHEADER)
if err != nil {
return err
}
key := latestHash
val, err := table.Get(key)
lastHeader := &payload.BlockBase{}
err = lastHeader.Decode(bytes.NewReader(val))
if err != nil {
return err
}
// TODO?:Maybe we should sort these headers using the Index
// We should not get them in mixed order, but doing it would not be expensive
// If they are already in order
// Do checks on headers
for _, currentHeader := range msg.Headers {
if lastHeader == nil {
// This should not happen as genesis header is added if new
// database, however we check nonetheless
return errors.New("Previous Header is nil")
}
// Check current hash links with previous
if currentHeader.PrevHash != lastHeader.Hash {
return errors.New("Last Header hash != current header Prev hash")
}
// Check current Index is one more than the previous Index
if currentHeader.Index != lastHeader.Index+1 {
return errors.New("Last Header Index != current header Index")
}
// Check current timestamp is more than the previous header's timestamp
if lastHeader.Timestamp > currentHeader.Timestamp {
return errors.New("Timestamp of Previous Header is more than Timestamp of current Header")
}
// NONONO:Do not check if current is more than 15 secs in future
// some blocks had delay from forks in past.
// NOTE: These are the only non-contextual checks we can do without the blockchain state
lastHeader = currentHeader
}
return nil
}
func (c *Chain) AddHeaders(msg *payload.HeadersMessage) error {
for _, header := range msg.Headers {
if err := c.db.AddHeader(header); err != nil {
return err
}
}
return nil
}

View file

@ -1,7 +0,0 @@
package chainparams
// GAS is the scripthash native neo asset: GAS
var GAS = "602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7"
// NEO is the scripthash native neo asset: NEO
var NEO = "c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b"

View file

@ -1,51 +0,0 @@
package chainparams
// For now we will just use this to store the
// peers, to test peer data
// Once complete, it will store the genesis params for testnet and mainnet
var mainnetSeedList = []string{
"seed1.neo.org:10333", // NOP
"seed2.neo.org:10333", // YEP
"seed3.neo.org:10333", // YEP
"seed4.neo.org:10333", // NOP
"seed5.neo.org:10333", // YEP
"13.59.52.94:10333", // NOP
"18.220.214.143:10333", // NOP
"13.58.198.112:10333", // NOP
"13.59.14.206:10333", // NOP
"18.216.9.7:10333", // NOP
}
//MainnetSeedList is a string slice containing the initial seeds from protocol.mainnet
// That are replying
var MainnetSeedList = []string{
"seed2.neo.org:10333",
"seed3.neo.org:10333",
"seed5.neo.org:10333",
"http://seed1.ngd.network:10333",
"http://seed2.ngd.network:10333",
"http://seed3.ngd.network:10333",
"http://seed4.ngd.network:10333",
"http://seed5.ngd.network:10333",
"http://seed6.ngd.network:10333",
"http://seed7.ngd.network:10333",
"http://seed8.ngd.network:10333",
"http://seed9.ngd.network:10333",
"http://seed10.ngd.network:10333",
}
var testNetSeedList = []string{
"18.218.97.227:20333",
"18.219.30.120:20333",
"18.219.13.91:20333",
"13.59.116.121:20333",
"18.218.255.178:20333",
}
// GenesisHash for mainnet
var GenesisHash = "d42561e3d30e15be6400b6df2f328e02d2bf6354c41dce433bc57687c82144bf"
// GenesisBlock for mainnet
var GenesisBlock = "000000000000000000000000000000000000000000000000000000000000000000000000f41bc036e39b0d6b0579c851c6fde83af802fa4e57bec0bc3365eae3abf43f8065fc8857000000001dac2b7c0000000059e75d652b5d3827bf04c165bbe9ef95cca4bf55010001510400001dac2b7c00000000400000455b7b226c616e67223a227a682d434e222c226e616d65223a22e5b08fe89a81e882a1227d2c7b226c616e67223a22656e222c226e616d65223a22416e745368617265227d5d0000c16ff28623000000da1745e9b549bd0bfa1a569971c77eba30cd5a4b00000000400001445b7b226c616e67223a227a682d434e222c226e616d65223a22e5b08fe89a81e5b881227d2c7b226c616e67223a22656e222c226e616d65223a22416e74436f696e227d5d0000c16ff286230008009f7fd096d37ed2c0e3f7f0cfc924beef4ffceb680000000001000000019b7cffdaa674beae0f930ebe6085af9093e5fe56b34a5c220ccdcf6efc336fc50000c16ff28623005fa99d93303775fe50ca119c327759313eccfa1c01000151"

View file

@ -1,25 +0,0 @@
package connmgr
import (
"net"
)
// Config contains all methods which will be set by the caller to setup the connection manager.
type Config struct {
// GetAddress will return a single address for the connection manager to connect to
GetAddress func() (string, error)
// OnConnection is called by the connection manager when
// we successfully connect to a peer
// The caller should ideally inform the address manager that we have connected to this address in this function
OnConnection func(conn net.Conn, addr string)
// OnAccept will take a established connection
OnAccept func(net.Conn)
// Port is the port in the format "10333"
Port string
// DialTimeout is the amount of time, before we can disconnect a pending dialed connection
DialTimeout int
}

View file

@ -1,272 +0,0 @@
package connmgr
import (
"errors"
"fmt"
"net"
"net/http"
"time"
"github.com/CityOfZion/neo-go/pkg/wire/util/ip"
)
var (
// maxOutboundConn is the maximum number of active peers
// that the connection manager will try to have
maxOutboundConn = 10
// maxRetries is the maximum amount of successive retries that
// we can have before we stop dialing that peer
maxRetries = uint8(5)
)
// Connmgr manages pending/active/failed cnnections
type Connmgr struct {
config Config
PendingList map[string]*Request
ConnectedList map[string]*Request
actionch chan func()
}
//New creates a new connection manager
func New(cfg Config) *Connmgr {
cnnmgr := &Connmgr{
cfg,
make(map[string]*Request),
make(map[string]*Request),
make(chan func(), 300),
}
go func() {
ip := iputils.GetLocalIP()
addrPort := ip.String() + ":" + cfg.Port
listener, err := net.Listen("tcp", addrPort)
if err != nil {
fmt.Println("Error connecting to outbound ", err)
}
defer func() {
listener.Close()
}()
for {
conn, err := listener.Accept()
if err != nil {
continue
}
// TODO(kev): in the OnAccept the connection address will be added to AddrMgr
go cfg.OnAccept(conn)
}
}()
return cnnmgr
}
// NewRequest will make a new connection
// Gets the address from address func in config
// Then dials it and assigns it to pending
func (c *Connmgr) NewRequest() {
// Fetch address
addr, err := c.config.GetAddress()
if err != nil {
fmt.Println("Error getting address", err)
}
// empty request item
r := &Request{}
r.Addr = addr
fmt.Println("Connecting")
c.Connect(r)
}
func (c *Connmgr) Connect(r *Request) error {
r.Retries++
conn, err := c.Dial(r.Addr)
if err != nil {
c.failed(r)
return err
}
r.Conn = conn
r.Inbound = true
// r.Permanent is set by the caller. default is false
// The permanent connections will be the ones that are hardcoded, e.g seed3.ngd.network
return c.connected(r)
}
func (cm *Connmgr) Disconnect(addr string) {
// fetch from connected list
r, ok := cm.ConnectedList[addr]
if !ok {
// If not in connected, check pending
r, ok = cm.PendingList[addr]
}
cm.disconnected(r)
}
// Dial is used to dial up connections given the addres and ip in the form address:port
func (c *Connmgr) Dial(addr string) (net.Conn, error) {
dialTimeout := 1 * time.Second
conn, err := net.DialTimeout("tcp", addr, dialTimeout)
if err != nil {
if !isConnected() {
return nil, errors.New("Fatal Error: You do not seem to be connected to the internet")
}
return conn, err
}
return conn, nil
}
func (cm *Connmgr) failed(r *Request) {
cm.actionch <- func() {
// priority to check if it is permanent or inbound
// if so then these peers are valuable in NEO and so we will just retry another time
if r.Inbound || r.Permanent {
multiplier := time.Duration(r.Retries * 10)
time.AfterFunc(multiplier*time.Second,
func() {
cm.Connect(r)
},
)
// if not then we should check if this request has had maxRetries
// if it has then get a new address
// if not then call Connect on it again
} else if r.Retries > maxRetries {
if cm.config.GetAddress != nil {
go cm.NewRequest()
}
fmt.Println("This peer has been tried the maximum amount of times and a source of new address has not been specified.")
} else {
go cm.Connect(r)
}
}
}
// Disconnected is called when a peer disconnects.
// we take the addr from peer, which is also it's key in the map
// and we use it to remove it from the connectedList
func (c *Connmgr) disconnected(r *Request) error {
errChan := make(chan error, 0)
c.actionch <- func() {
var err error
if r == nil {
err = errors.New("Request object is nil")
}
r2 := *r // dereference it, so that r.Addr is not lost on delete
// if for some reason the underlying connection is not closed, close it
r.Conn.Close()
r.Conn = nil
// if for some reason it is in pending list, remove it
delete(c.PendingList, r.Addr)
delete(c.ConnectedList, r.Addr)
c.failed(&r2)
errChan <- err
}
return <-errChan
}
//Connected is called when the connection manager
// makes a successful connection.
func (c *Connmgr) connected(r *Request) error {
errorChan := make(chan error, 0)
c.actionch <- func() {
var err error
// This should not be the case, since we connected
// Keeping it here to be safe
if r == nil {
err = errors.New("Request object as nil inside of the connected function")
}
// reset retries to 0
r.Retries = 0
// add to connectedList
c.ConnectedList[r.Addr] = r
// remove from pending if it was there
delete(c.PendingList, r.Addr)
if c.config.OnConnection != nil {
c.config.OnConnection(r.Conn, r.Addr)
}
fmt.Println("Error connected", err)
errorChan <- err
}
return <-errorChan
}
// Pending is synchronous, we do not want to continue with logic
// until we are certain it has been added to the pendingList
func (c *Connmgr) pending(r *Request) error {
errChan := make(chan error, 0)
c.actionch <- func() {
var err error
if r == nil {
err = errors.New("Error : Request object is nil")
}
c.PendingList[r.Addr] = r
errChan <- err
}
return <-errChan
}
func (c *Connmgr) Run() {
go c.loop()
}
func (c *Connmgr) loop() {
for {
select {
case f := <-c.actionch:
f()
}
}
}
// https://stackoverflow.com/questions/50056144/check-for-internet-connection-from-application
func isConnected() (ok bool) {
_, err := http.Get("http://clients3.google.com/generate_204")
if err != nil {
return false
}
return true
}

View file

@ -1,106 +0,0 @@
package connmgr_test
import (
"testing"
"github.com/CityOfZion/neo-go/pkg/connmgr"
"github.com/stretchr/testify/assert"
)
func TestDial(t *testing.T) {
cfg := connmgr.Config{
GetAddress: nil,
OnConnection: nil,
OnAccept: nil,
Port: "",
DialTimeout: 0,
}
cm := connmgr.New(cfg)
cm.Run()
ipport := "google.com:80" // google unlikely to go offline, a better approach to test Dialing is welcome.
conn, err := cm.Dial(ipport)
assert.Equal(t, nil, err)
assert.NotEqual(t, nil, conn)
}
func TestConnect(t *testing.T) {
cfg := connmgr.Config{
GetAddress: nil,
OnConnection: nil,
OnAccept: nil,
Port: "",
DialTimeout: 0,
}
cm := connmgr.New(cfg)
cm.Run()
ipport := "google.com:80"
r := connmgr.Request{Addr: ipport}
cm.Connect(&r)
assert.Equal(t, 1, len(cm.ConnectedList))
}
func TestNewRequest(t *testing.T) {
address := "google.com:80"
var getAddr = func() (string, error) {
return address, nil
}
cfg := connmgr.Config{
GetAddress: getAddr,
OnConnection: nil,
OnAccept: nil,
Port: "",
DialTimeout: 0,
}
cm := connmgr.New(cfg)
cm.Run()
cm.NewRequest()
if _, ok := cm.ConnectedList[address]; ok {
assert.Equal(t, true, ok)
assert.Equal(t, 1, len(cm.ConnectedList))
return
}
assert.Fail(t, "Could not find the address in the connected lists")
}
func TestDisconnect(t *testing.T) {
address := "google.com:80"
var getAddr = func() (string, error) {
return address, nil
}
cfg := connmgr.Config{
GetAddress: getAddr,
OnConnection: nil,
OnAccept: nil,
Port: "",
DialTimeout: 0,
}
cm := connmgr.New(cfg)
cm.Run()
cm.NewRequest()
cm.Disconnect(address)
assert.Equal(t, 0, len(cm.ConnectedList))
}

View file

@ -1,26 +0,0 @@
# Package - Connection Manager
## Responsibility
- Manages the active, failed and pending connections for the node.
## Features
- Takes an address, dials it and packages it into a request to manage.
- Retry failed connections.
- Uses one function as a source for it's addresses. It does not manage addresses.
## Usage
The following methods are exposed from the Connection manager:
- NewRequest() : This will fetch a new address and connect to it.
- Connect(r *Request) : This takes a Request object and connects to it. It follow the same logic as NewRequest() however instead of getting the address from the datasource given upon initialisation, you directly feed the address you want to connect to.
- Disconnect(addrport string) : Given an address:port, this will disconnect it, close the connection and remove it from the connected and pending list, if it was there.
- Dial(addrport string) (net.Conn, error) : Given an address:port, this will connect to it and return a pointer to a connection plus a nil error if successful, or nil with an error.

View file

@ -1,13 +0,0 @@
package connmgr
import (
"net"
)
type Request struct {
Conn net.Conn
Addr string
Permanent bool
Inbound bool
Retries uint8 // should not be trying more than 255 tries
}

View file

@ -1,31 +0,0 @@
package mempool
import "time"
type Config struct {
// This is the maximum amount
// of transactions that we will allow in the mempool
MaxNumOfTX uint64
// FreeTX defines the maximum amount of free txs that can be in the mempool at one time
// Default is 20
FreeTX uint32
// MinTXFee is a number in Fixed8 format. If set at 1GAS, minTXFee would equal 1e8
// The mineTXFee is used to set the floor, it defaults to zero meaning we will allow all transactions
// with a fee of 0 or more
MinTXFee uint64
// MaxTXSize is the maximum number of bytes a tx can have to be entered into the pool
MaxTXSize uint64
// TXTTL is the duration to which we should keep an item in the mempool before removing it
// HMM: Should this be amount of blocks instead? For when blocks take time a long time
// to process?
TXTTL time.Duration
// SigLimit is the maximum amount of signatures
// that we will allow a tx to have, default will be 20
SigLimit uint8
}

View file

@ -1,138 +0,0 @@
package mempool
import (
"errors"
"fmt"
"sync"
"github.com/CityOfZion/neo-go/pkg/wire/payload/transaction"
"github.com/CityOfZion/neo-go/pkg/wire/util"
)
var (
ErrMemPoolFull = errors.New("mempool is currently full")
ErrMempoolEmpty = errors.New("There are no TXs in the mempool")
ErrTXTooBig = errors.New("TX has exceed the maximum threshold")
ErrTXTooManyWitnesses = errors.New("Too many witness scripts")
ErrFeeTooLow = errors.New("Fee for transaction too low")
ErrDuplicateTX = errors.New("TX Already in pool")
)
type Mempool struct {
mtx sync.RWMutex
pool map[util.Uint256]*TX
cfg Config
}
func New(cfg Config) *Mempool {
mem := &Mempool{
sync.RWMutex{},
make(map[util.Uint256]*TX, 200),
cfg,
}
return mem
}
func (m *Mempool) AddTransaction(trans transaction.Transactioner) error {
hash, err := trans.ID()
if err != nil {
return err
}
// check if tx already in pool
if m.Exists(hash) {
return ErrDuplicateTX
}
m.mtx.Lock()
defer m.mtx.Unlock()
if m.cfg.MaxNumOfTX == uint64(len(m.pool)) {
return ErrMemPoolFull
}
// TODO:Check for double spend from blockchain itself
// create tx descriptor
tx := Descriptor(trans)
// check TX size
if tx.Size > m.cfg.MaxTXSize {
return ErrTXTooBig
}
// check witness length
if tx.NumWitness > m.cfg.SigLimit {
return ErrTXTooManyWitnesses
}
// TODO: check witness data is good -- Add method to take the Witness and tx return true or false.(blockchain)
//check fee is over minimum cnfigured
if tx.Fee < m.cfg.MinTXFee {
return ErrFeeTooLow
}
// Add into pool
m.pool[hash] = tx
return nil
}
// RemoveTransaction will remove a transaction from the nodes mempool
func (m *Mempool) RemoveTransaction(hash util.Uint256) error {
m.mtx.Lock()
defer m.mtx.Unlock()
if len(m.pool) == 0 {
return ErrMempoolEmpty
}
// deletes regardless of whether key is there or not. So do not check for existence before delete.
// Use Exists() for this.
delete(m.pool, hash)
return nil
}
// Size returns the size of the mempool
func (m *Mempool) Size() uint64 {
m.mtx.RLock()
len := uint64(len(m.pool))
m.mtx.RUnlock()
return len
}
// ReturnAllTransactions will return all transactions in the
// mempool, will be mostly used by the RPC server
func (m *Mempool) ReturnAllTransactions() ([]transaction.Transactioner, error) {
transactions := make([]transaction.Transactioner, 0)
m.mtx.RLock()
defer m.mtx.RUnlock()
if len(m.pool) == 0 {
return nil, ErrMempoolEmpty
}
for _, t := range m.pool {
if t.ParentTX == nil {
fmt.Println(t, "NILNIL")
}
transactions = append(transactions, *t.ParentTX)
fmt.Println(transactions)
}
return transactions, nil
}
// Exists check whether the transaction exists in the mempool
func (m *Mempool) Exists(hash util.Uint256) bool {
m.mtx.RLock()
_, ok := m.pool[hash]
m.mtx.RUnlock()
return ok
}

View file

@ -1,211 +0,0 @@
package mempool_test
import (
"testing"
"time"
"github.com/CityOfZion/neo-go/pkg/mempool"
"github.com/CityOfZion/neo-go/pkg/wire/payload/transaction"
"github.com/stretchr/testify/assert"
)
func TestMempoolExists(t *testing.T) {
cfg := mempool.Config{
MaxNumOfTX: 100,
FreeTX: 20,
MinTXFee: 0,
MaxTXSize: 10000,
TXTTL: 10 * time.Minute,
SigLimit: 20,
}
mem := mempool.New(cfg)
trans := transaction.NewContract(0)
assert.Equal(t, false, mem.Exists(trans.Hash))
err := mem.AddTransaction(trans)
assert.Equal(t, nil, err)
assert.Equal(t, true, mem.Exists(trans.Hash))
}
func TestMempoolFullPool(t *testing.T) {
maxTx := uint64(100)
cfg := mempool.Config{
MaxNumOfTX: maxTx,
FreeTX: 20,
MinTXFee: 0,
MaxTXSize: 10000,
TXTTL: 10 * time.Minute,
SigLimit: 20,
}
mem := mempool.New(cfg)
for i := uint64(1); i <= maxTx; i++ {
trans := transaction.NewContract(0)
attr := &transaction.Attribute{
Usage: transaction.Remark,
Data: []byte{byte(i)},
}
trans.AddAttribute(attr)
err := mem.AddTransaction(trans)
assert.Equal(t, nil, err)
}
trans := transaction.NewContract(0)
err := mem.AddTransaction(trans)
assert.NotEqual(t, nil, err)
assert.Equal(t, mempool.ErrMemPoolFull, err)
}
func TestMempoolLargeTX(t *testing.T) {
maxTxSize := uint64(100)
cfg := mempool.Config{
MaxNumOfTX: 100,
FreeTX: 20,
MinTXFee: 0,
MaxTXSize: maxTxSize,
TXTTL: 10 * time.Minute,
SigLimit: 20,
}
mem := mempool.New(cfg)
trans := transaction.NewContract(0)
for i := uint64(1); i <= 100; i++ { // 100 attributes will be over 100 bytes
attr := &transaction.Attribute{
Usage: transaction.Remark,
Data: []byte{byte(i)},
}
trans.AddAttribute(attr)
}
err := mem.AddTransaction(trans)
assert.NotEqual(t, nil, err)
assert.Equal(t, mempool.ErrTXTooBig, err)
}
func TestMempoolTooManyWitness(t *testing.T) {
maxWitness := uint8(3)
cfg := mempool.Config{
MaxNumOfTX: 100,
FreeTX: 20,
MinTXFee: 0,
MaxTXSize: 10000,
TXTTL: 10 * time.Minute,
SigLimit: maxWitness,
}
mem := mempool.New(cfg)
trans := transaction.NewContract(0)
for i := uint8(1); i <= maxWitness; i++ { // 100 attributes will be over 100 bytes
wit := &transaction.Witness{
InvocationScript: []byte{byte(i)},
VerificationScript: []byte{byte(i)},
}
trans.AddWitness(wit)
}
trans.AddWitness(&transaction.Witness{
InvocationScript: []byte{},
VerificationScript: []byte{},
})
err := mem.AddTransaction(trans)
assert.NotEqual(t, nil, err)
assert.Equal(t, mempool.ErrTXTooManyWitnesses, err)
}
func TestMempoolDuplicate(t *testing.T) {
cfg := mempool.Config{
MaxNumOfTX: 100,
FreeTX: 20,
MinTXFee: 0,
MaxTXSize: 10000,
TXTTL: 10 * time.Minute,
SigLimit: 1,
}
mem := mempool.New(cfg)
trans := transaction.NewContract(0)
err := mem.AddTransaction(trans)
assert.Equal(t, nil, err)
err = mem.AddTransaction(trans)
assert.NotEqual(t, nil, err)
assert.Equal(t, mempool.ErrDuplicateTX, err)
}
func TestMempoolReturnAll(t *testing.T) {
cfg := mempool.Config{
MaxNumOfTX: 100,
FreeTX: 20,
MinTXFee: 0,
MaxTXSize: 10000,
TXTTL: 10 * time.Minute,
SigLimit: 1,
}
mem := mempool.New(cfg)
numTx := uint64(10)
for i := uint64(1); i <= numTx; i++ {
trans := transaction.NewContract(0)
attr := &transaction.Attribute{
Usage: transaction.Remark,
Data: []byte{byte(i)},
}
trans.AddAttribute(attr)
err := mem.AddTransaction(trans)
assert.Equal(t, nil, err)
}
AllTrans, err := mem.ReturnAllTransactions()
assert.Equal(t, nil, err)
assert.Equal(t, numTx, uint64(len(AllTrans)))
}
func TestMempoolRemove(t *testing.T) {
cfg := mempool.Config{
MaxNumOfTX: 100,
FreeTX: 20,
MinTXFee: 0,
MaxTXSize: 10000,
TXTTL: 3 * time.Minute,
SigLimit: 1,
}
mem := mempool.New(cfg)
// Remove a transaction when mempool is empty
trans := transaction.NewContract(0)
hash, _ := trans.ID()
err := mem.RemoveTransaction(hash)
assert.Equal(t, mempool.ErrMempoolEmpty, err)
// Add tx1 into mempool
err = mem.AddTransaction(trans)
assert.Equal(t, nil, err)
diffTrans := transaction.NewContract(0) // TX2
diffTrans.AddAttribute(
&transaction.Attribute{
Usage: transaction.Remark,
Data: []byte{},
})
diffHash, _ := diffTrans.ID()
// Try removing TX2, when only TX1 is in mempool
err = mem.RemoveTransaction(diffHash)
assert.Equal(t, nil, err)
assert.Equal(t, uint64(1), mem.Size())
// Remove hash that is in mempool
err = mem.RemoveTransaction(hash)
assert.Equal(t, nil, err)
assert.Equal(t, uint64(0), mem.Size())
}

View file

@ -1,45 +0,0 @@
package mempool
import (
"time"
"github.com/CityOfZion/neo-go/pkg/wire/payload/transaction"
)
// TX is a wrapper struct around a normal tx
// which includes extra information about the TX
// <aligned>
type TX struct {
ParentTX *transaction.Transactioner
Added time.Time
Fee uint64
Size uint64
NumWitness uint8
Free bool
}
// Descriptor takes a transaction and puts it into a new TX struct along with metadata
func Descriptor(trans transaction.Transactioner) *TX {
var desc TX
desc.ParentTX = &trans
desc.Fee = getFee(trans.TXOs(), trans.UTXOs())
desc.Free = desc.Fee != 0
desc.Added = time.Now()
desc.Size = uint64(len(trans.Bytes()))
numWit := len(trans.Witness())
if numWit > 255 || numWit < 0 { // < 0 should not happen
numWit = 255
}
desc.NumWitness = uint8(numWit)
return &desc
}
// TODO: need blockchain package complete for fee calculation
// HMM:Could also put the function in the config
func getFee(in []*transaction.Input, out []*transaction.Output) uint64 {
// query utxo set for inputs, then subtract from out to get fee
return 0
}

View file

@ -1,67 +0,0 @@
package peermanager
import (
"errors"
"fmt"
"github.com/CityOfZion/neo-go/pkg/peer"
"github.com/CityOfZion/neo-go/pkg/wire/util"
)
// NOTE: This package may be removed in the future
// and so full functionality is not yet implemented, see Issue #33 for more details.
//PeerMgr will act as a convenience Mgr
// It will be notified of added Peers
// It will take care of sending messages to the right peers. In this way, it acts as a load balancer
// If we send a getdata to one peer, it will be smart and send it to another peer who is not as busy
// Using subscription model, we can have the syncmanager/other modules notify the peermgr when they have received data
type PeerMgr struct {
peers []*peer.Peer
}
// New will create a new peer manager
// As of now it just returns a peerMgr struct and so
// the New method is redundant. A config file will be passed as a parameter,
// if it is decided that we will use this.
func New() *PeerMgr {
return &PeerMgr{}
}
// Disconnect will close the connection on a peer and
// remove it from the list
// TODO: remove from list once disconnected
func (pm *PeerMgr) Disconnect(p *peer.Peer) {
p.Disconnect()
// Once disconnected, we remove it from the list
// and look for more peers to connect to
}
// RequestHeaders will request the headers from the most available peer
// As of now, it requests from the first peer in the list, TODO(Kev)
func (pm *PeerMgr) RequestHeaders(hash util.Uint256) (*peer.Peer, error) {
if len(pm.peers) == 0 {
return nil, errors.New("Peer manager currently has no peers")
}
return pm.peers[0], pm.peers[0].RequestHeaders(hash)
}
// RequestBlocks will request blocks from the most available peer
// As of now, it requests from the first peer in the list, TODO(Kev)
func (pm *PeerMgr) RequestBlocks(hash []util.Uint256) (*peer.Peer, error) {
if len(pm.peers) == 0 {
return nil, errors.New("Peer manager currently has no peers")
}
return pm.peers[0], pm.peers[0].RequestBlocks(hash)
}
// AddPeer will add a new peer for the PeerManager to use
func (pm *PeerMgr) AddPeer(p *peer.Peer) error {
pm.peers = append(pm.peers, p)
fmt.Println("Adding peers into the peermanager")
return nil
}

View file

@ -1,11 +0,0 @@
package syncmanager
import (
"github.com/CityOfZion/neo-go/pkg/blockchain"
"github.com/CityOfZion/neo-go/pkg/wire/util"
)
type Config struct {
Chain *blockchain.Chain
BestHash util.Uint256
}

View file

@ -1,152 +0,0 @@
// The syncmanager will use a modified verison of the initial block download in bitcoin
// Seen here: https://en.bitcoinwiki.org/wiki/Bitcoin_Core_0.11_(ch_5):_Initial_Block_Download
// MovingWindow is a desired featured from the original codebase
package syncmanager
import (
"fmt"
"github.com/CityOfZion/neo-go/pkg/peermanager"
"github.com/CityOfZion/neo-go/pkg/blockchain"
"github.com/CityOfZion/neo-go/pkg/peer"
"github.com/CityOfZion/neo-go/pkg/wire/payload"
"github.com/CityOfZion/neo-go/pkg/wire/util"
)
var (
// This is the maximum amount of inflight objects that we would like to have
// Number taken from original codebase
maxBlockRequest = 1024
// This is the maximum amount of blocks that we will ask for from a single peer
// Number taken from original codebase
maxBlockRequestPerPeer = 16
)
type Syncmanager struct {
pmgr *peermanager.PeerMgr
Mode int // 1 = headersFirst, 2 = Blocks, 3 = Maintain
chain *blockchain.Chain
headers []util.Uint256
inflightBlockReqs map[util.Uint256]*peer.Peer // when we send a req for block, we will put hash in here, along with peer who we requested it from
}
// New will setup the syncmanager with the required
// parameters
func New(cfg Config) *Syncmanager {
return &Syncmanager{
peermanager.New(),
1,
cfg.Chain,
[]util.Uint256{},
make(map[util.Uint256]*peer.Peer, 2000),
}
}
func (s *Syncmanager) AddPeer(peer *peer.Peer) error {
return s.pmgr.AddPeer(peer)
}
func (s *Syncmanager) OnHeaders(p *peer.Peer, msg *payload.HeadersMessage) {
fmt.Println("Sync manager On Headers called")
// On receipt of Headers
// check what mode we are in
// HeadersMode, we check if there is 2k. If so call again. If not then change mode into BlocksOnly
if s.Mode == 1 {
err := s.HeadersFirstMode(p, msg)
if err != nil {
fmt.Println("Error re blocks", err)
return // We should custom name error so, that we can do something on WrongHash Error, Peer disconnect error
}
return
}
}
func (s *Syncmanager) HeadersFirstMode(p *peer.Peer, msg *payload.HeadersMessage) error {
fmt.Println("Headers first mode")
// Validate Headers
err := s.chain.ValidateHeaders(msg)
if err != nil {
// Re-request headers from a different peer
s.pmgr.Disconnect(p)
fmt.Println("Error Validating headers", err)
return err
}
// Add Headers into db
err = s.chain.AddHeaders(msg)
if err != nil {
// Try addding them into the db again?
// Since this is simply a db insert, any problems here means trouble
//TODO(KEV) : Should we Switch off system or warn the user that the system is corrupted?
fmt.Println("Error Adding headers", err)
//TODO: Batching is not yet implemented,
// So here we would need to remove headers which have been added
// from the slice
return err
}
// Add header hashes into slice
// Requets first batch of blocks here
var hashes []util.Uint256
for _, header := range msg.Headers {
hashes = append(hashes, header.Hash)
}
s.headers = append(s.headers, hashes...)
if len(msg.Headers) == 2*1e3 { // should be less than 2000, leave it as this for tests
fmt.Println("Switching to BlocksOnly Mode")
s.Mode = 2 // switch to BlocksOnly. XXX: because HeadersFirst is not in parallel, no race condition here.
return s.RequestMoreBlocks()
}
lastHeader := msg.Headers[len(msg.Headers)-1]
_, err = s.pmgr.RequestHeaders(lastHeader.Hash)
return err
}
func (s *Syncmanager) RequestMoreBlocks() error {
var blockReq []util.Uint256
var reqAmount int
if len(s.headers) >= maxBlockRequestPerPeer {
reqAmount = maxBlockRequestPerPeer
blockReq = s.headers[:reqAmount]
} else {
reqAmount = len(s.headers)
blockReq = s.headers[:reqAmount]
}
peer, err := s.pmgr.RequestBlocks(blockReq)
if err != nil { // This could happen if the peermanager has no valid peers to connect to. We should wait a bit and re-request
return err // alternatively we could make RequestBlocks blocking, then make sure it is not triggered when a block is received
}
//XXX: Possible race condition, between us requesting the block and adding it to
// the inflight block map? Give that node a medal.
for _, hash := range s.headers {
s.inflightBlockReqs[hash] = peer
}
s.headers = s.headers[reqAmount:]
// NONONO: Here we do not pass all of the hashes to peermanager because
// it is not the peermanagers responsibility to mange inflight blocks
return err
}
// OnBlock receives a block from a peer, then passes it to the blockchain to process.
// For now we will only use this simple setup, to allow us to test the other parts of the system.
// See Issue #24
func (s *Syncmanager) OnBlock(p *peer.Peer, msg *payload.BlockMessage) {
err := s.chain.AddBlock(msg)
if err != nil {
// Put headers back in front of queue to fetch block for.
fmt.Println("Block had an error", err)
}
}

178
server.go
View file

@ -1,178 +0,0 @@
package main
import (
"errors"
"fmt"
"net"
"github.com/CityOfZion/neo-go/pkg/blockchain"
"github.com/CityOfZion/neo-go/pkg/database"
"github.com/CityOfZion/neo-go/pkg/syncmanager"
"github.com/CityOfZion/neo-go/pkg/connmgr"
"github.com/CityOfZion/neo-go/pkg/peer"
"github.com/CityOfZion/neo-go/pkg/wire/payload"
"github.com/CityOfZion/neo-go/pkg/wire/protocol"
"github.com/CityOfZion/neo-go/pkg/wire/util"
"github.com/CityOfZion/neo-go/pkg/wire/util/io"
)
// this file will act as a stub server
// Will create a server package
type Server struct {
chain *blockchain.Chain
db *database.LDB // TODO(Kev) change to database.Database
sm *syncmanager.Syncmanager
cm *connmgr.Connmgr
peercfg peer.LocalConfig
latestHash util.Uint256
}
func (s *Server) setupConnMgr() error {
// Connection Manager - Integrate
s.cm = connmgr.New(connmgr.Config{
GetAddress: nil,
OnConnection: s.OnConn,
OnAccept: nil,
Port: "10333",
})
return nil
}
func (s *Server) setupDatabase() error {
// Database -- Integrate
s.db = database.New("test")
return nil
}
func (s *Server) setupChain() error {
// Blockchain - Integrate
s.chain = blockchain.New(s.db, protocol.MainNet)
if s.chain != nil {
table := database.NewTable(s.db, database.HEADER)
resa, err := table.Get(database.LATESTHEADER)
s.latestHash, err = util.Uint256DecodeBytes(resa)
if err != nil {
return errors.New("Failed to get LastHeader " + err.Error())
}
} else {
return errors.New("Failed to add genesis block")
}
return nil
}
func (s *Server) setupSyncManager() error {
// Sync Manager - Integrate
s.sm = syncmanager.New(syncmanager.Config{
Chain: s.chain,
BestHash: s.latestHash,
})
return nil
}
func (s *Server) setupPeerConfig() error {
// Peer config struct - Integrate
s.peercfg = peer.LocalConfig{
Net: protocol.MainNet,
UserAgent: "DIG",
Services: protocol.NodePeerService,
Nonce: 1200,
ProtocolVer: 0,
Relay: false,
Port: 10332,
StartHeight: LocalHeight,
OnHeader: s.sm.OnHeaders,
OnBlock: s.sm.OnBlock,
}
return nil
}
func (s *Server) Run() error {
// Add all other run based methods for modules
// Connmgr - Run
s.cm.Run()
// Initial hardcoded nodes to connect to
err := s.cm.Connect(&connmgr.Request{
Addr: "seed1.ngd.network:10333",
})
return err
}
func main() {
setup()
}
func setup() {
server := Server{}
fmt.Println(server.sm)
err := server.setupConnMgr()
err = server.setupDatabase()
err = server.setupChain()
err = server.setupSyncManager()
err = server.setupPeerConfig()
fmt.Println(server.sm)
err = server.Run()
if err != nil {
fmt.Println(err)
}
<-make(chan struct{})
}
func OnHeader(peer *peer.Peer, msg *payload.HeadersMessage) {
for _, header := range msg.Headers {
if err := fileutils.UpdateFile("headers.txt", []byte(header.Hash.String())); err != nil {
fmt.Println("Error writing headers to file")
break
}
}
if len(msg.Headers) == 2000 { // reached tip
lastHeader := msg.Headers[len(msg.Headers)-1]
fmt.Println("Latest hash is", lastHeader.Hash.String())
fmt.Println("Latest Header height is", lastHeader.Index)
err := peer.RequestHeaders(lastHeader.Hash.Reverse())
if err != nil {
fmt.Println("Error getting more headers", err)
}
}
}
func LocalHeight() uint32 {
return 10
}
// OnConn is called when a successful connection has been made
func (s *Server) OnConn(conn net.Conn, addr string) {
fmt.Println(conn.RemoteAddr().String())
fmt.Println(addr)
p := peer.NewPeer(conn, false, s.peercfg)
err := p.Run()
if err != nil {
fmt.Println("Error running peer" + err.Error())
}
if err == nil {
s.sm.AddPeer(p)
}
// This is here just to quickly test the system
err = p.RequestHeaders(s.latestHash)
fmt.Println("For tests, we are only fetching first 2k batch")
if err != nil {
fmt.Println(err.Error())
}
}