forked from TrueCloudLab/neoneo-go
Compare commits
422 commits
master
...
master-2.x
Author | SHA1 | Date | |
---|---|---|---|
|
fae1d562e7 | ||
|
ef282e6cb7 | ||
|
673ea0db87 | ||
|
82ecf8c4c9 | ||
|
56c20ed817 | ||
|
233aa94bba | ||
|
580f1787b9 | ||
|
29fec1c2ee | ||
|
554dd75a9a | ||
|
7588bf75c2 | ||
|
f38d62ab14 | ||
|
92e3474f6e | ||
|
b3a0a8f115 | ||
|
c3175112fe | ||
|
5d86176ec1 | ||
|
cb76757e6b | ||
|
0888dda6a2 | ||
|
38d14d4e3a | ||
|
9d6439bbe6 | ||
|
46a59e904a | ||
|
33db3f76c0 | ||
|
36938462f1 | ||
|
bcc2d1bb8f | ||
|
d8c8593410 | ||
|
1202494479 | ||
|
663f10192f | ||
|
4a2bdf5ddc | ||
|
483fefbb62 | ||
|
259522abde | ||
|
39a2360448 | ||
|
3f50f90dc5 | ||
|
1179fdb44d | ||
|
98580ae9b4 | ||
|
7fc4f3c4ea | ||
|
9d6a5dc26f | ||
|
8571107741 | ||
|
eb1986d2fc | ||
|
38842531ca | ||
|
d4f26fe473 | ||
|
07c91ea22d | ||
|
1ef91fa409 | ||
|
850d29060a | ||
|
70a20ce031 | ||
|
5ff57e890b | ||
|
d1e02c393d | ||
|
62a11807a8 | ||
|
feb6ba2ef7 | ||
|
a46c93f6bb | ||
|
fda8b784c8 | ||
|
9c34dea296 | ||
|
c3b6405f0f | ||
|
90a06ed7a5 | ||
|
9571ecffac | ||
|
89fb02a7f8 | ||
|
17c010e07d | ||
|
9e35758653 | ||
|
ee45d7739f | ||
|
363c24d128 | ||
|
9756ed2b06 | ||
|
edf587bbf1 | ||
|
0d2bc8f4a6 | ||
|
0510d1e6c1 | ||
|
5d6065c3ee | ||
|
4a8259caea | ||
|
124c674b17 | ||
|
50f477b2af | ||
|
66a64dd4c9 | ||
|
0333107327 | ||
|
75c4251a06 | ||
|
73bb2c2543 | ||
|
64d0876fc0 | ||
|
0ffffb93d7 | ||
|
3a1b67e9f7 | ||
|
d93ddfda10 | ||
|
06f3c34981 | ||
|
f3abbf34e3 | ||
|
df1792c80b | ||
|
49d176010e | ||
|
ef3eb0a842 | ||
|
559024671a | ||
|
68fc8168ec | ||
|
136e4b5886 | ||
|
52135dcade | ||
|
d775f07a55 | ||
|
77ecdcabb4 | ||
|
14c39b2b26 | ||
|
d2ee2b5f9f | ||
|
78773df6ec | ||
|
b36371ed94 | ||
|
50d6cd6b0d | ||
|
168366f33e | ||
|
46d314fb4d | ||
|
ca0ff47c05 | ||
|
4ff5bb361a | ||
|
c83cefecc6 | ||
|
e3c360c477 | ||
|
6e5fd359ae | ||
|
9fb749ff68 | ||
|
b8705bdb79 | ||
|
08782e265b | ||
|
ec631984d0 | ||
|
35c09a2d37 | ||
|
3efc373e8a | ||
|
7a976d47f3 | ||
|
67ee4ed0a6 | ||
|
615ae1b3aa | ||
|
2e0e5cff25 | ||
|
26d4a05e57 | ||
|
23719f7e72 | ||
|
13f29805bb | ||
|
916603d495 | ||
|
5703c4859b | ||
|
2f436eee4f | ||
|
4f7fa732cf | ||
|
010c22e2b5 | ||
|
56d57611ca | ||
|
0ece58e6dd | ||
|
a1357789cf | ||
|
e14ba6c855 | ||
|
c4d287f326 | ||
|
0ef65d1bb9 | ||
|
8865d5b2c5 | ||
|
319e3996f4 | ||
|
5b05081525 | ||
|
b310ac051b | ||
|
d3e415d3bd | ||
|
6761efff24 | ||
|
75ed6c8c08 | ||
|
9aee3e5a34 | ||
|
04ebef9119 | ||
|
39897e811d | ||
|
1ffc316cc1 | ||
|
8ff2d35723 | ||
|
c4057b8906 | ||
|
cf48e82242 | ||
|
e8fd6fde99 | ||
|
43f986d24d | ||
|
c86522719a | ||
|
25307834ab | ||
|
ee61120e13 | ||
|
5f46aa096f | ||
|
56b2a16389 | ||
|
43eb670d45 | ||
|
b8ec1b1d93 | ||
|
dfa53c635d | ||
|
d8aac7c675 | ||
|
47ed2eda43 | ||
|
8850ecd4ca | ||
|
92f8cee6cd | ||
|
ab7a69bfec | ||
|
253c39d4ee | ||
|
ffbdcb202f | ||
|
57a325b3d5 | ||
|
ebcec6e5dc | ||
|
4b351f3123 | ||
|
d20e54b725 | ||
|
4715efd531 | ||
|
92851aa8e4 | ||
|
f1ac01578b | ||
|
010d55e92f | ||
|
bb4385ca50 | ||
|
db98f8f30b | ||
|
02033f8355 | ||
|
fe5e5ff44a | ||
|
9aa7b7fc97 | ||
|
edeb9a3d2e | ||
|
7bd4488ff9 | ||
|
022fb04077 | ||
|
407e348cd5 | ||
|
ab5eff620b | ||
|
50e5e6fe29 | ||
|
e4fcd90b6d | ||
|
49f9c4ad7e | ||
|
e135719c38 | ||
|
c4c9f9225c | ||
|
c6d33c5841 | ||
|
a702423c4a | ||
|
f0d75afc48 | ||
|
7cd1bca1e1 | ||
|
7bdbfbad19 | ||
|
8341913468 | ||
|
7d5d6b620e | ||
|
692565c5a2 | ||
|
6d32751292 | ||
|
dbbe57d11a | ||
|
35c60cd8f4 | ||
|
3d62db4f34 | ||
|
38c195fea9 | ||
|
4d0f4d3e51 | ||
|
e375267422 | ||
|
4374e98f15 | ||
|
bf3c29c319 | ||
|
1ab4f81fc3 | ||
|
fd778e0250 | ||
|
dcb00c61f2 | ||
|
59cd19f5d4 | ||
|
78900148ee | ||
|
5ec96b4ccc | ||
|
25b742d24e | ||
|
cefad683e3 | ||
|
c6894bfb3f | ||
|
bc3f17a890 | ||
|
7e5533e77e | ||
|
8f1ddc0651 | ||
|
c489f975d4 | ||
|
a7e8f07073 | ||
|
d12258e79c | ||
|
eb37f92881 | ||
|
e97c153a26 | ||
|
654877fb1b | ||
|
b517aa43db | ||
|
d8dddabc86 | ||
|
b4b0ae0f51 | ||
|
45a95b5242 | ||
|
d8556b80bc | ||
|
3ccf19fc1e | ||
|
2b21102c14 | ||
|
fbadb317f5 | ||
|
25fdc62203 | ||
|
ea17793aee | ||
|
6d5e70163d | ||
|
03977083c0 | ||
|
cce4380eee | ||
|
86110445a6 | ||
|
9ee9cb8e39 | ||
|
b5fb63d091 | ||
|
d128b55dbf | ||
|
c06b3b669d | ||
|
bc81b56708 | ||
|
2cddeadd9a | ||
|
423495ecc3 | ||
|
2863343f03 | ||
|
3a6186af54 | ||
|
fec8916cb9 | ||
|
ad0cf146e8 | ||
|
511d18d409 | ||
|
9767817ee4 | ||
|
aebed3826b | ||
|
caf53740d3 | ||
|
e2e1bd09ae | ||
|
f8051da0bd | ||
|
f665843887 | ||
|
9615e83c00 | ||
|
cc5b5bff2e | ||
|
9d6b0ee4a8 | ||
|
bf6aa02dcf | ||
|
d5ba2e49a2 | ||
|
8614867439 | ||
|
4732103294 | ||
|
7d786fac79 | ||
|
a6541c4514 | ||
|
c13ca8d597 | ||
|
37173bcf22 | ||
|
788304ec49 | ||
|
edd60e656e | ||
|
76f71ab1ef | ||
|
1fd7938fd8 | ||
|
519a98039c | ||
|
8cbbddddaf | ||
|
fe8038e8b7 | ||
|
53dc7f27b6 | ||
|
dcaa82b32b | ||
|
349d8a1984 | ||
|
6437f4b32e | ||
|
9f7f94a6fb | ||
|
8e785feeb6 | ||
|
4f1cf07075 | ||
|
7b1a54c934 | ||
|
44f93c7c69 | ||
|
0b87a68210 | ||
|
a1c4d7ce26 | ||
|
e3af560d11 | ||
|
69ccca675d | ||
|
685d3eb870 | ||
|
f77c239296 | ||
|
5794bdb169 | ||
|
a6ffa90ceb | ||
|
24785f1f50 | ||
|
baafa30266 | ||
|
6c06bc57cc | ||
|
10189b6ab3 | ||
|
edcfdb3bde | ||
|
20f190ef69 | ||
|
7d24fc5500 | ||
|
029fecbb71 | ||
|
1b6aee42d5 | ||
|
fd73310a1c | ||
|
e4f413aa29 | ||
|
2f90a06db3 | ||
|
314430be1d | ||
|
103c45850a | ||
|
9c478378e1 | ||
|
31d9aeddd2 | ||
|
861a1638e8 | ||
|
806b28aab7 | ||
|
9a0d7d3254 | ||
|
0f6d01faa1 | ||
|
90536d533d | ||
|
06bed2b4bf | ||
|
0da1935ebb | ||
|
b0d07c3031 | ||
|
503442a60d | ||
|
776bd85ded | ||
|
79c87ca8a5 | ||
|
78bd01db57 | ||
|
7a783b64d7 | ||
|
86ce234d5f | ||
|
ff0241ed07 | ||
|
7431263903 | ||
|
fe2e501488 | ||
|
3675502927 | ||
|
eeeb05fc2c | ||
|
8ed77f0d25 | ||
|
fb9b2ae982 | ||
|
4fda492873 | ||
|
f8b41e486c | ||
|
a8fa68914a | ||
|
e07fd4f9f0 | ||
|
61c595909a | ||
|
cd8445aa59 | ||
|
60bca03577 | ||
|
0ce3a12e87 | ||
|
d0853c0471 | ||
|
1721360dd1 | ||
|
328c6d7ce5 | ||
|
318ca55982 | ||
|
5ec70b9fc2 | ||
|
b96fe8173c | ||
|
03cc8c118f | ||
|
f0d6b0a639 | ||
|
ddbc9057c8 | ||
|
94d6b5466f | ||
|
0079dfb695 | ||
|
2c921f5277 | ||
|
8c19b8f2a1 | ||
|
085d50b430 | ||
|
ec2bf7d52e | ||
|
a587274351 | ||
|
514f862b81 | ||
|
d0a3ce25ff | ||
|
07742bdf46 | ||
|
dff0f724cd | ||
|
eb82661f6b | ||
|
a17bd6176f | ||
|
31d7b07eb5 | ||
|
6c0553da47 | ||
|
d5d497ccd7 | ||
|
8fb4bbca4e | ||
|
5cebd4a7a2 | ||
|
78b2387640 | ||
|
10abac4362 | ||
|
f0047b4055 | ||
|
a43f2234dd | ||
|
541d56a874 | ||
|
5d941364bd | ||
|
80b8b50f02 | ||
|
9e94895bb0 | ||
|
d09c3b1e27 | ||
|
7abd35b358 | ||
|
77c5f28b09 | ||
|
f69e654260 | ||
|
cbcc8e160f | ||
|
231a5189a2 | ||
|
9c46e79745 | ||
|
83df376d17 | ||
|
6dff01672c | ||
|
bd51fe4c11 | ||
|
8db714785d | ||
|
f0abbfd399 | ||
|
8cd7bc7e07 | ||
|
9546e021a9 | ||
|
83febead59 | ||
|
8f55f0ac76 | ||
|
78716c5335 | ||
|
da32cff313 | ||
|
966ff28091 | ||
|
1d5734c882 | ||
|
9454ef5c28 | ||
|
5464f9c3ae | ||
|
e1408b6525 | ||
|
29ada4ca46 | ||
|
d5df1212c2 | ||
|
d686fe4e5d | ||
|
dd8bcfae47 | ||
|
1ac4f8528d | ||
|
2e58a14978 | ||
|
c85e77e1c9 | ||
|
bb481e9712 | ||
|
3db14b4699 | ||
|
943d435cd2 | ||
|
a1d8206ef7 | ||
|
37813f1020 | ||
|
91a3b655b7 | ||
|
922aff1fcc | ||
|
b2a3cee451 | ||
|
2a6be8ceef | ||
|
e097e86bfa | ||
|
a025838e52 | ||
|
99d0bafa2c | ||
|
592fd068b1 | ||
|
556ab39a5a | ||
|
6333060897 | ||
|
38f9b511ae | ||
|
e32367a1c2 | ||
|
a49f2bc47c | ||
|
00d6439eb2 | ||
|
24b46c6041 | ||
|
3eb6d266ca | ||
|
3d2a2af575 | ||
|
dbe7be5b00 | ||
|
21c31d7d24 | ||
|
87b0e76a8c | ||
|
8f417c8e8b | ||
|
92a99624c4 | ||
|
24675d0688 | ||
|
c813873577 | ||
|
84edcf0b0e | ||
|
2b943b76ae | ||
|
71fb153a69 | ||
|
51512c73ed | ||
|
4b5cc0b460 | ||
|
1433c795c2 |
189 changed files with 13511 additions and 2705 deletions
|
@ -3,11 +3,6 @@ orbs:
|
|||
codecov: codecov/codecov@1.0.5
|
||||
|
||||
executors:
|
||||
go1_12:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
environment:
|
||||
GO111MODULE: "on"
|
||||
go1_13:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
|
@ -54,16 +49,6 @@ jobs:
|
|||
name: go-vet
|
||||
command: go vet ./...
|
||||
|
||||
test_1_12:
|
||||
working_directory: /go/src/github.com/nspcc-dev/neo-go
|
||||
executor: go1_12
|
||||
steps:
|
||||
- checkout
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
- gomod
|
||||
- run: go test -v -race ./...
|
||||
|
||||
test_1_13:
|
||||
working_directory: /go/src/github.com/nspcc-dev/neo-go
|
||||
executor: go1_13
|
||||
|
@ -88,7 +73,7 @@ jobs:
|
|||
|
||||
build_cli:
|
||||
working_directory: /go/src/github.com/nspcc-dev/neo-go
|
||||
executor: go1_12
|
||||
executor: go1_14
|
||||
steps:
|
||||
- checkout
|
||||
- gomod
|
||||
|
@ -99,7 +84,7 @@ jobs:
|
|||
|
||||
build_image:
|
||||
working_directory: /go/src/github.com/nspcc-dev/neo-go
|
||||
executor: go1_12
|
||||
executor: go1_14
|
||||
docker:
|
||||
- image: golang:1-alpine
|
||||
steps:
|
||||
|
@ -130,10 +115,6 @@ workflows:
|
|||
filters:
|
||||
tags:
|
||||
only: v/[0-9]+\.[0-9]+\.[0-9]+/
|
||||
- test_1_12:
|
||||
filters:
|
||||
tags:
|
||||
only: v/[0-9]+\.[0-9]+\.[0-9]+/
|
||||
- test_1_13:
|
||||
filters:
|
||||
tags:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
BIN=/usr/bin/neo-go
|
||||
|
||||
if [ -z "$ACC"]; then
|
||||
if [ -z "$ACC" ]; then
|
||||
ACC=/6000-privnet-blocks.acc.gz
|
||||
fi
|
||||
|
||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -31,8 +31,6 @@ TAGS
|
|||
# leveldb
|
||||
chains/
|
||||
chain/
|
||||
blockchain/
|
||||
blockchains/
|
||||
|
||||
# patch
|
||||
*.orig
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.14.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
install:
|
||||
|
|
444
CHANGELOG.md
444
CHANGELOG.md
|
@ -2,6 +2,450 @@
|
|||
|
||||
This document outlines major changes between releases.
|
||||
|
||||
## 0.78.4 "Cementation" (12 Aug 2021)
|
||||
|
||||
Neo Legacy is still there and we still support our node for it. This update
|
||||
makes NeoGo compatible with awaited GAS generation cut off on mainnet planned
|
||||
for block 8000000. It also fixes some bugs along the way. DB format isn't
|
||||
changed, so if you're updating before block 8M you can reuse old database, but
|
||||
you need to update configuration to add NoBonusHeight parameter (see our
|
||||
default configuration files).
|
||||
|
||||
New features:
|
||||
* system GAS can now be added to invocations via CLI (#1783)
|
||||
* NoBonusHeight configuration parameter to turn GAS generation off at some
|
||||
height (#2107)
|
||||
|
||||
Bugs fixed:
|
||||
* invalid transfer event could cause node to panic (#1901)
|
||||
* incorrect MPT initialization could lead to block rejection if node stopped
|
||||
synchronization before any MPT change (#1910)
|
||||
* duplicate connections to peers (#2020)
|
||||
* infinite loop attempting to connect to other nodes in some cases (#2020)
|
||||
* ping messages sent with wrong height and nonce (#2116)
|
||||
* node could lose some data on forced stop in some cases (#2126)
|
||||
|
||||
## 0.78.3 "Calcification" (22 February 2021)
|
||||
|
||||
We're updating NeoGo node for Neo 2 network with this release bringing a
|
||||
number of fixes for various problems. Some were found during our internal RPC
|
||||
compatibility audit and some are just backports from Neo 3 development
|
||||
branch. There are no new features introduced, but it's an important
|
||||
upgrade. Unfortunately we had to change the DB format to fix some problems, so
|
||||
add some time for resynchronization to your node maintenance plan.
|
||||
|
||||
Improvements:
|
||||
* RPC documentation was updated mentioning all known differences with C#
|
||||
implementation that are considered to be not worth fixing (#1760)
|
||||
|
||||
Bugs fixed:
|
||||
* when mempool is full new transaction's hash could still be added into it
|
||||
even if transaction is to be rejected afterwards (#1623)
|
||||
* `gettxout` RPC API was returning data for already spent outputs, although it
|
||||
shouldn't (C# node doesn't do that, #1649)
|
||||
* `getrawtransaction` RPC call returned metadata for pooled (not yet accepted)
|
||||
transactions (#1648)
|
||||
* `gettransactionheight` RPC call returned zero height for pooled (not yet
|
||||
accepted) transactions (#1648)
|
||||
* VM CLI set breakpoints after target instructions, although it should set
|
||||
them before execution (#1648)
|
||||
* a peer could be disconnected and not reconnected again properly in some
|
||||
cases (#1648)
|
||||
* missing write timeout could lead to broadcasting stalls (#1648)
|
||||
* CN transaction requests could try to ask for more transactions from peers
|
||||
than it is allowed to by the protocol (#1648)
|
||||
* CN transaction requests could be trimmed unnecessarily (#1648)
|
||||
* FP Fixed8 values (GAS usually) could be unmarshalled from JSON/YAML
|
||||
incorrectly in some cases (#1761)
|
||||
* previous proposal reuse could lead to empty blocks accepted even if there
|
||||
are transactions in the mempool (#1761)
|
||||
* UTXO asset issuer wasn't properly stored by the node (#1760)
|
||||
* `getassetstate` RPC call answer used wrong field name for "frozen" data
|
||||
(#1760)
|
||||
* UTXO asset type was serialized as numeric code instead of a string in
|
||||
`getrawtransaction` and getassetstate RPC call results (#1760)
|
||||
* `getblockheader` RPC call didn't support block numbers in parameters (#1760)
|
||||
* `getblocksystemfee` RPC call returned block's system fee while it should
|
||||
return cumulative system fee from genesis block to the one requested
|
||||
(#1760)
|
||||
* system fee calculation was wrong for some types of transactions (#1760)
|
||||
* `gettxout` RPC call implementation was returning asset hash in wrong byte
|
||||
order (#1760)
|
||||
* `getpeers` RPC call implementation returned port numbers wrapped into
|
||||
strings (#1760)
|
||||
* `getenrollments` RPC call implementation returned duplicate wrong records
|
||||
along with proper ones (#1760)
|
||||
* NEP5 tracking code wasn't deleting entries with zero balance leading to
|
||||
excessive getnep5balances request outputs for some addresses (#1760)
|
||||
* NEP5 tracking code wasn't able to handle tokens overflowing int64 in
|
||||
transfer and balance amounts (like pnWETH, #1760)
|
||||
* `confirmations` value in `getblock` RPC call response was shifted by two
|
||||
(#1760)
|
||||
* some fields with default values were omitted for `getrawtransaction` and
|
||||
`getblock` RPC call answers with Register and Publish transactions (#1760)
|
||||
|
||||
## 0.78.2 "Colorization" (03 December 2020)
|
||||
|
||||
Release 0.78.2 adds some minor features to the node while still being
|
||||
compatible both for protocol and for DB format.
|
||||
|
||||
New features:
|
||||
* `getblocktransfertx` RPC API to get list of asset-transferring transactions
|
||||
(both UTXO and NEP5) for a block, see docs/rpc.md for more details (#1516)
|
||||
* support for batched JSON-RPC requests (#1509)
|
||||
* `KeepOnlyLatestState` option to only track the latest MPT state and not store
|
||||
old trees, it's disabled by default (keeping old behavior) and can only be
|
||||
turned on for a new DB (resynchronizing state from the genesis block), but
|
||||
it allows to drastically reduce disk space requirements for mainnet node
|
||||
(and even make it work a bit faster while still having stateroot
|
||||
tracking). Nodes with stateroot disabled (`EnableStateRoot: false`) use ~53GB
|
||||
for DB at current 6.5M height, stateroot-enabled nodes (`EnableStateRoot:
|
||||
true`) require ~122GB, but with `KeepOnlyLatestState: true` setting (and
|
||||
`EnableStateRoot: true`) the DB size is ~56GB, so the difference is quite
|
||||
noticeable for disk-constrained setups.
|
||||
|
||||
Improvements:
|
||||
* mempooled transaction retransmission mechanism (#1532)
|
||||
|
||||
## 0.78.1 "Circumnavigation" (22 October 2020)
|
||||
|
||||
This is a bug fix release that solves DB access and seed reconnection
|
||||
problems. If you're using LevelDB or BadgerDB for backend storage you need to
|
||||
resynchronize your node for testnet and you're recommended to do that for
|
||||
mainnet. BoltDB users can proceed with the old DB.
|
||||
|
||||
Behavior changes:
|
||||
* default mainnet configuration now has updated values for FreeGas changes
|
||||
height (6195000 instead of 6200000, #1412)
|
||||
|
||||
Improvements:
|
||||
* dBFT timer was tuned to eliminate block processing delay from the
|
||||
inter-block interval, this only affects consensus nodes (#1429)
|
||||
|
||||
Bugs fixed:
|
||||
* node using LevelDB or BadgerDB for backend storage had incorrect state for
|
||||
testnet (#1468)
|
||||
* node that failed to connect to seeds on startup never attempted to
|
||||
reconnect to them again (#1483)
|
||||
* node using BoltDB could panic in various situations like processing a new
|
||||
block, this was noticed on the master branch with 3.0 support and never
|
||||
seen on 2.x, although theoretically it could still happen (#1482)
|
||||
|
||||
## 0.78.0 "Cardioacceleration" (15 September 2020)
|
||||
|
||||
This version is a Neo 2.12.0-compatible release that follows DeFi-related
|
||||
network changes while also introducing some new RPC functionality and fixing
|
||||
bugs.
|
||||
|
||||
There is an important configuration update there, please check FreeGasLimit,
|
||||
MaxFreeTransactionsPerBlock and MaxTransactionsPerBlock settings as they're
|
||||
now height-dependent (see the default mainnet and testnet configurations for
|
||||
example of how to configure them).
|
||||
|
||||
If you're using getutxotransfer calls, please resynchronize your node to fix
|
||||
NEO values returned.
|
||||
|
||||
New features:
|
||||
* minimum network fee policy value can now be configured, but it defaults to
|
||||
zero for now (#1397)
|
||||
* stateroot import/export (#1307)
|
||||
* new `getalltransfertx` RPC call returning all UTXO and NEP5 transactions
|
||||
for specified account (parameters are the same as for `getnep5transfers`,
|
||||
see RPC documentation for details, #1399, #1404)
|
||||
* GetUTXOTransfers support was added to RPC client (#1399)
|
||||
|
||||
Behavior changes:
|
||||
* FreeGasLimit is now height-dependent (configured as a map, instead of
|
||||
simple value), default value for mainnet since 6200000 and testnet since
|
||||
4840000 is 50.0 (#1394, #1397, #1400, #1402)
|
||||
* getnep5transfers and getutxotransfers RPC calls now support limiting and
|
||||
paging in addition to time span selection, by default both only return a
|
||||
1000 of elements (and it's the maximum possible) for a single call (#1395,
|
||||
#1399)
|
||||
* MaxTransactionsPerBlock and MaxFreeTransactionsPerBlock are
|
||||
height-dependent now, the default values for mainnet and testnet change
|
||||
since 6200000 and 4840000 respectively to 200 transactions per block with
|
||||
199 free ones
|
||||
* RPC client's GetNEP5Transfers call was updated to support new parameters
|
||||
(#1399)
|
||||
|
||||
Improvements:
|
||||
* the project is now using proper YAML library import path (#1306)
|
||||
* the node will exit with an error if no valid password is provided for CN
|
||||
wallet (#1316)
|
||||
|
||||
Bugs fixed:
|
||||
* state root addition errors in CN logs (harmless, but annoying, #1313)
|
||||
* state root was generated by CNs before StateRootEnableIndex (#1320)
|
||||
* panic on consensus watcher node (#1314)
|
||||
* RPC calls working with 256-bit hash parameters were not accepting hashes
|
||||
with "0x" prefix (#1369)
|
||||
* getutxotransfer RPC call returned NEO values multiplied by 10⁸ (#1386)
|
||||
* incorrect vout indexes in getblock or getrawtransaction RPC outputs (#1392)
|
||||
* network fee check was not accounting for LowPriorityThreshold correctly
|
||||
(#1397)
|
||||
|
||||
## 0.77.0 "Cornification" (11 August 2020)
|
||||
|
||||
This release is aligned with Neo 2.11.0 release, bringing important changes to
|
||||
the NeoX consensus protocol, some bug fixes and new RPC functionality. It is
|
||||
tested to work in consensus with 2.11.0. The DB format was changed for NEP5
|
||||
tracking data and we also track UTXO transfers now, so you need to
|
||||
resynchronize the DB from the genesis for this version to operate correctly.
|
||||
|
||||
New features:
|
||||
* `getutxotransfers` RPC call to track NEO and GAS movements. It accepts
|
||||
address as mandatory parameter, asset name ("gas" or "neo") and start/end
|
||||
time as optional parameters (similar to `getnep5transfers`). See #1268 and
|
||||
#1288.
|
||||
* `invoke*` RPC calls now support passing hashes to be used as verified ones
|
||||
during execution (for contract's `CheckWitness` calls), this functionality
|
||||
is also available in the CLI now (#1282)
|
||||
|
||||
Behavior changes:
|
||||
* `getnep5transfers` and `getnep5balance` now return raw balance values, the
|
||||
same way C# node does (previously the result was adjusted by contract's
|
||||
decimals value, #1250)
|
||||
* `getstateheight` RPC call response now uses lowercase field names (#1274)
|
||||
* `getnep5transfers` RPC call now accepts start/end optional time parameters
|
||||
(the same way C# plugin does), lacking start option the result might be
|
||||
different from the one returned by previous node versions (#1284)
|
||||
* we no longer support Go 1.12 to build the node (#1285)
|
||||
* NeoX consensus was reworked to exchange signatures during Prepare phase and
|
||||
emit stateroot message along with Commit (#1291). Note that this new
|
||||
consensus process is not compatible with 0.76.X versions of neo-go or
|
||||
2.10.3-neox-preview1 version of C# node, but it is compatible with 2.11.0,
|
||||
so you need to upgrade all of your consensus nodes for the network to run
|
||||
correctly. Also note that the default configuration for mainnet was changed
|
||||
to enable StateRoot functionality.
|
||||
|
||||
Bugs fixed:
|
||||
* `getnep5transfers` RPC call not returning results for migrated contracts
|
||||
(#1215)
|
||||
* incorrect consensus recovery message data for NeoX-enabled networks (#1270)
|
||||
|
||||
## 0.76.2 "Calibration" (19 July 2020)
|
||||
|
||||
Minor update for Neo 2. If you're running testnet node, we recommend to
|
||||
resynchronize it to fix state mismatch at block 4516236, mainnet is not known
|
||||
to have any similar problem so you can keep the old DB.
|
||||
|
||||
Bugs fixed:
|
||||
* rare panic on node shutdown (#1185)
|
||||
* VM not clearing alt stack when doing CALLs (#1158)
|
||||
* incorrect resource accounting for isolated calls (#1186)
|
||||
* state height wasn't updated properly for networks with StateRootEnableIndex
|
||||
higher than zero (like testnet, #1213)
|
||||
|
||||
## 0.76.1 "Conduplication" (15 July 2020)
|
||||
|
||||
Minor bug fixing for Neo 2 implementation. If you're affected by NEP5 balance
|
||||
tracking bug, please resynchronize your node to get correct results, other
|
||||
than that it's not required, the DB is fully compatible.
|
||||
|
||||
Bugs fixed:
|
||||
* block synchronization stalls on consensus node when it's not yet up to date
|
||||
and there are a lot of blocks to fetch (#1116)
|
||||
* getnep5balances and getnep5transfers RPC calls didn't support passing
|
||||
addresses as parameters (#1146)
|
||||
* NEP5 balance tracking was not aware of contract migrations leading to wrong
|
||||
data being returned for getnep5balances RPC call (#1144)
|
||||
|
||||
## 0.76.0 "Cross-pollination" (25 June 2020)
|
||||
|
||||
We wanted to make a 0.75.1-neox-preview1 release of neo-go compatible with
|
||||
version 2.10.3-neox-preview1 of C# node, but then suddenly decided to make
|
||||
something better than that and now release 0.76.0 instead which brings with
|
||||
it configurable cross-chain functionality support. So there is no need to use
|
||||
different node builds for different networks with NeoGo, one binary fits
|
||||
all. As usual some bugs were also fixed, this time mostly concentrating around
|
||||
consensus functionality.
|
||||
|
||||
New features:
|
||||
* cross-chain (aka neox) support, refer to docs/neox.md for details on what's
|
||||
included and how to use it
|
||||
* compiler built-in Remove function was added for slices/maps element
|
||||
deletion (#1021)
|
||||
|
||||
Behavior changes:
|
||||
* the default testnet config now enables state root with StateRootEnableIndex
|
||||
of 4380100, if you have a testnet node you need to either stay with the old
|
||||
configuration or remove the DB and resynchronize blocks from genesis
|
||||
* contracts using comparison with nil will fail to compile, this comparison
|
||||
actually never functioned correctly (#952)
|
||||
|
||||
Improvements:
|
||||
* storage cache flushing was optimized, improving block import speed by ~10%
|
||||
(#1014)
|
||||
* consensus process now logs a bit more relevant messages (#1041)
|
||||
* limits to invocation scripts were added to consensus messages (#1087)
|
||||
* transaction request retries were added in case consensus process is missing
|
||||
some transactions (#1095)
|
||||
|
||||
Bugs fixed:
|
||||
* dbft's failed nodes detection was tuned for the case of node lagging behind
|
||||
the network (#1009)
|
||||
* dbft was fixed to always process recovery messages for the current block
|
||||
(#1009)
|
||||
* compiler now initializes complex struct fields correctly (#952)
|
||||
* NOTEQUAL opcode is now only being emitted for integers by the compiler (#952)
|
||||
* typo in docker entrypoint script preventing proper bootstrap file passing
|
||||
(#1089)
|
||||
* MaxFreeTransactionsPerBlock limit wasn't really enforced (#1019)
|
||||
* changeview payloads were not processed correctly by the consensus subsystem
|
||||
(#1041)
|
||||
* dbft library wasn't including messages from higher views for last seen
|
||||
message checks (#1041)
|
||||
* recovery request sent instead of recovery message in one case (#1095)
|
||||
* some correctness checks were not done for proposed block in not all
|
||||
transactions were present when the node received PrepareRequest (#1095)
|
||||
* timeout wasn't properly calculated in some cases for Primary node leading
|
||||
to early PrepareRequest send (#1095)
|
||||
|
||||
## 0.75.0 "Caramelization" (28 May 2020)
|
||||
|
||||
A long-awaited Neo 2.0 update for neo-go that fixes a lot of subtle little
|
||||
differences in VM and syscalls behavior compared to C# node that resulted in
|
||||
storage state mismatches between two nodes. This release makes neo-go fully
|
||||
compatible with public testnet and mainnet chains, for every transaction in
|
||||
every block you get the same result.
|
||||
|
||||
But it's not just about bugs, as it's been quite a long development cycle,
|
||||
we've also included some interesting new features like notification subsystem,
|
||||
neo-debugger compatibility and BadgerDB support. Smart contract compiler and
|
||||
interop packages were also updated making neo-go even better for developing
|
||||
real-world complex smart contracts.
|
||||
|
||||
New features:
|
||||
* support for `for` loops with no condition was added to the compiler (#799)
|
||||
* compiler can now emit debug information compatible with neo-debugger (#804,
|
||||
#829)
|
||||
* experimental BadgerDB support was added (#839)
|
||||
* support for abi.json files generation was added for contract deployment
|
||||
with NEO-Express (#916)
|
||||
* RPC over websocket connections is now supported both by the server (with
|
||||
ws://$SERVER:$PORT/ws URL) and client (WSClient structure, #921)
|
||||
* notification subsystem was added for RPC server and client (using websocket
|
||||
connections, #895)
|
||||
* interop package now has a complete set of syscalls available (#795, #956)
|
||||
* push command was added to VM CLI (#967)
|
||||
* diff dumps are now supported in `db restore` CLI command (#991)
|
||||
|
||||
Behavior changes:
|
||||
* due to DB format changes you'll need to resynchronize your node from
|
||||
scratch (and it'll also update its state to a more proper one)
|
||||
* runtime.Notify interop now accepts varargs (#825)
|
||||
* compiler's `--debug` parameter is now used for debug information generation,
|
||||
use `--verbose` to get more internal compiler's messages (#829)
|
||||
* compiler now outputs hex-encoded contract's bytecode only with `--verbose`
|
||||
option (#829, previously it was always printed)
|
||||
* RPC client's representation of GetBlock results changed significantly (#951)
|
||||
* some interop functions changed their in/out types to a more proper ones
|
||||
(#956, though previous ones didn't really work, so no current code should
|
||||
notice that)
|
||||
* `skip` parameter to `db restore` CLI command was replaced by `start`
|
||||
(#991), when using full (non-diff) NGD dumps with short (`-s`) form they're
|
||||
compatible, but `start` (as a block index) makes more sense for diff dumps
|
||||
|
||||
Improvements:
|
||||
* (*Stack).PushVal in vm now supports all types of integers (#776)
|
||||
* state.AppExecResult now stores stack in binary representation (#782)
|
||||
* vm.NewBigIntegerItem now accepts int64 parameter (#788)
|
||||
* RPC client tests were added (#716)
|
||||
* buffer reuse added for persisting temporary changes which improved block
|
||||
handling speed (#772)
|
||||
* significant RPC server package refactoring was done (#753)
|
||||
* BoltDB was updated to v1.3.4 (#793)
|
||||
* Go 1.14 is now supported (#775)
|
||||
* serialization/deserialization tests were unified (#802)
|
||||
* emit package now has Array and AppCallWithOperationAndArgs methods for
|
||||
easier script creation (#803)
|
||||
* persisting from MemCachedStore to MemCachedStore was optimized which
|
||||
allowed to gain 10% improvement in block import time for 1.5M mainnet
|
||||
blocks (#807)
|
||||
* storage.Find usage example was added (#795)
|
||||
* VM stack item tests were improved (#812)
|
||||
* `config` directory now only contains configuration files, no Go code (#423,
|
||||
#816)
|
||||
* local variables are counted more accurately now in the compiler leading to
|
||||
less waste in script runtime (#815)
|
||||
* NEP5 example was extended with minting function making it a bit more usable
|
||||
(#823)
|
||||
* DAO was refactored away into its own package from core (#832)
|
||||
* additional tests were added for bitwise and numeric VM operations (#833)
|
||||
* tests for VM CALL* instructions were added (#833)
|
||||
* consensus message parsing was split into two parts so that ordinary nodes
|
||||
won't touch CN's data (#862)
|
||||
* contract's metadata was moved into smartcontract package from rpc (#916)
|
||||
* interop packages documentation was extended (#956)
|
||||
* Docker build was fixed to use Makefile (#981)
|
||||
|
||||
Bugs fixed:
|
||||
* integer stack values were not following C# node format when being converted
|
||||
to JSON (#770)
|
||||
* vm now uses truncated division to match C# node behavior for negative
|
||||
integers (#773)
|
||||
* getapplicationlog and other RPC calls now convert Uint160 to JSON the same
|
||||
way C# node does (#769)
|
||||
* asset state JSON representation now follows the same format as C# node uses
|
||||
(with proper field names for 'id' and 'type' and without 'fee' add
|
||||
'address' fields, #785, #819)
|
||||
* `nextconsensus` field in `getblockheader` RPC call answer was using hex
|
||||
representation instead of address (#763)
|
||||
* `getcontractstate` RPC call implementation was using wrong format for
|
||||
`script` field in answer (#763)
|
||||
* `getnep5balances` RPC call implementation was using wrong name for
|
||||
`balance` field in answer (#763)
|
||||
* `getvalidators` RPC call was using non-compliant validators keys format in
|
||||
its answer (#763)
|
||||
* potential problems with GAS parameter handling in CLI (#790)
|
||||
* `contract init` CLI command was not working correctly (#792)
|
||||
* RPC calls now accept integer parameters in JSON strings which fix some
|
||||
incompatibilities (#794)
|
||||
* CALLI VM instruction was using wrong offset (#791, #798)
|
||||
* vm Map stack item for using different serialization format from C# node
|
||||
(#806, #808)
|
||||
* invoke* RPC calls were returning stack in `Stack` element rather than more
|
||||
compliant `stack` (#805)
|
||||
* slices of compound types were treated wrong by the compiler (#800)
|
||||
* wrong code was generated for struct variables declarations by the compiler
|
||||
(#800)
|
||||
* RPC client was incorrectly processing GetRawTransactionVerbose,
|
||||
GetBlockVerbose, GetBlockHeader, GetTxOut and ValidateAddress call results
|
||||
(#789)
|
||||
* type-specific transaction data was completely missing in getrawtransaction
|
||||
RPC call verbose output (#585)
|
||||
* documentation for wallet CLI commands was fixed to follow renames made by
|
||||
previous version (#814)
|
||||
* panic in map-containing notifications processing (#809)
|
||||
* VM Map implementation now has deterministic iteration order (#818)
|
||||
* MOD instruction behavior was fixed to follow neo-vm (#826)
|
||||
* negative arguments are now accepted for SHL/SHR VM instructions (#827)
|
||||
* improper in-block cache propagation leading to storage state differences
|
||||
with C# node (#821, #817)
|
||||
* CLI didn't check for mandatory `method` parameter presence for
|
||||
invokefunction command (#828)
|
||||
* wrong code generated by the compiler for return statements with no
|
||||
parameters (#929)
|
||||
* wrong code generated by the compiler for multiple function arguments with
|
||||
one type specification (#935)
|
||||
* NEP5 example contained wrong address check (#945)
|
||||
* improper code generated by the compiler for `op=` assignments to struct
|
||||
fields and slice elements (#954)
|
||||
* Storage.Find elements order was adjusted to match C# node implementation
|
||||
and eliminate state differences resulting from that (#822, #977, #988,
|
||||
#994)
|
||||
* step command wasn't really working in VM CLI (#967)
|
||||
* PICKITEM instruction implementation was fixed to reject improper input
|
||||
argument types (#965, #967)
|
||||
* Runtime.CheckWitness was fixed to only accept compressed keys (#968, #971)
|
||||
* input data length check was added to (*PublicKey).DecodeBytes (#971)
|
||||
* VM reference counting for SETITEM instruction was fixed (#973)
|
||||
* Map VM stack item can now be converted to Boolean (#974)
|
||||
* Structs were not cloned by SETITEM instruction implementation (#972, #975)
|
||||
* GetUnspentCoins syscall implementation was fixed to return array (#978,
|
||||
#979, #984)
|
||||
|
||||
## 0.74.0 "Comprehension" (17 Mar 2020)
|
||||
|
||||
Functionally complete NEO 2.0 node implementation, this release can be used as
|
||||
|
|
13
Dockerfile
13
Dockerfile
|
@ -2,7 +2,7 @@
|
|||
FROM golang:1-alpine as builder
|
||||
|
||||
RUN set -x \
|
||||
&& apk add --no-cache git \
|
||||
&& apk add --no-cache git make \
|
||||
&& mkdir -p /tmp
|
||||
|
||||
COPY . /neo-go
|
||||
|
@ -12,14 +12,7 @@ WORKDIR /neo-go
|
|||
ARG REPO=repository
|
||||
ARG VERSION=dev
|
||||
|
||||
# https://github.com/golang/go/wiki/Modules#how-do-i-use-vendoring-with-modules-is-vendoring-going-away
|
||||
# go build -mod=vendor
|
||||
RUN set -x \
|
||||
&& export GOGC=off \
|
||||
&& export GO111MODULE=on \
|
||||
&& export CGO_ENABLED=0 \
|
||||
&& export LDFLAGS="-X ${REPO}/config.Version=${VERSION}" \
|
||||
&& go build -v -mod=vendor -ldflags "${LDFLAGS}" -o /go/bin/neo-go ./cli
|
||||
RUN make
|
||||
|
||||
# Executable image
|
||||
FROM alpine
|
||||
|
@ -33,7 +26,7 @@ COPY --from=builder /neo-go/config /config
|
|||
COPY --from=builder /neo-go/.docker/6000-privnet-blocks.acc.gz /6000-privnet-blocks.acc.gz
|
||||
COPY --from=builder /neo-go/.docker/1600-privnet-blocks-single.acc.gz /1600-privnet-blocks-single.acc.gz
|
||||
COPY --from=builder /neo-go/.docker/privnet-entrypoint.sh /usr/bin/privnet-entrypoint.sh
|
||||
COPY --from=builder /go/bin/neo-go /usr/bin/neo-go
|
||||
COPY --from=builder /neo-go/bin/neo-go /usr/bin/neo-go
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
|
||||
ENTRYPOINT ["/usr/bin/privnet-entrypoint.sh"]
|
||||
|
|
5
Makefile
5
Makefile
|
@ -26,7 +26,7 @@ build: deps
|
|||
@set -x \
|
||||
&& export GOGC=off \
|
||||
&& export CGO_ENABLED=0 \
|
||||
&& go build -v -mod=vendor -ldflags $(BUILD_FLAGS) -o ${BINARY} ./cli/main.go
|
||||
&& go build -trimpath -v -mod=vendor -ldflags $(BUILD_FLAGS) -o ${BINARY} ./cli/main.go
|
||||
|
||||
neo-go.service: neo-go.service.template
|
||||
@sed -r -e 's_BINDIR_$(BINDIR)_' -e 's_UNITWORKDIR_$(UNITWORKDIR)_' -e 's_SYSCONFIGDIR_$(SYSCONFIGDIR)_' $< >$@
|
||||
|
@ -61,6 +61,9 @@ image-push:
|
|||
check-version:
|
||||
git fetch && (! git rev-list ${VERSION})
|
||||
|
||||
version:
|
||||
@echo ${VERSION}
|
||||
|
||||
deps:
|
||||
@go mod tidy -v
|
||||
@go mod vendor
|
||||
|
|
58
README.md
58
README.md
|
@ -7,29 +7,35 @@
|
|||
|
||||
<hr />
|
||||
|
||||
[![codecov](https://codecov.io/gh/nspcc-dev/neo-go/branch/master/graph/badge.svg)](https://codecov.io/gh/nspcc-dev/neo-go)
|
||||
[![CircleCI](https://circleci.com/gh/nspcc-dev/neo-go/tree/master.svg?style=svg)](https://circleci.com/gh/nspcc-dev/neo-go/tree/master)
|
||||
[![Report](https://goreportcard.com/badge/github.com/nspcc-dev/neo-go)](https://goreportcard.com/report/github.com/nspcc-dev/neo-go)
|
||||
[![GoDoc](https://godoc.org/github.com/nspcc-dev/neo-go?status.svg)](https://godoc.org/github.com/nspcc-dev/neo-go)
|
||||
[![codecov](https://codecov.io/gh/nspcc-dev/neo-go/branch/master-2.x/graph/badge.svg)](https://codecov.io/gh/nspcc-dev/neo-go/branch/master-2.x)
|
||||
[![CircleCI](https://circleci.com/gh/nspcc-dev/neo-go/tree/master-2.x.svg?style=svg)](https://circleci.com/gh/nspcc-dev/neo-go/tree/master-2.x)
|
||||
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/nspcc-dev/neo-go?sort=semver)
|
||||
![License](https://img.shields.io/github/license/nspcc-dev/neo-go.svg?style=popout)
|
||||
|
||||
# Overview
|
||||
|
||||
This project aims to be a full port of the original C# [NEO project](https://github.com/neo-project).
|
||||
This project aims to be a full port of the original C# [Neo project](https://github.com/neo-project).
|
||||
A complete toolkit for the NEO blockchain, including:
|
||||
|
||||
- [Consensus node](docs/consensus.md)
|
||||
- [RPC node & client](https://github.com/nspcc-dev/neo-go/tree/master/docs/rpc.md)
|
||||
- [CLI tool](https://github.com/nspcc-dev/neo-go/blob/master/docs/cli.md)
|
||||
- [Smart contract compiler](https://github.com/nspcc-dev/neo-go/blob/master/docs/compiler.md)
|
||||
- [NEO virtual machine](https://github.com/nspcc-dev/neo-go/blob/master/docs/vm.md)
|
||||
- [RPC node & client](docs/rpc.md)
|
||||
- [CLI tool](docs/cli.md)
|
||||
- [Smart contract compiler](docs/compiler.md)
|
||||
- [NEO virtual machine](docs/vm.md)
|
||||
|
||||
This branch (**master-2.x**) is a stable version of the project compatible
|
||||
with Neo 2 (including [cross-chain neox support](docs/neox.md)), it only
|
||||
receives bug fixes and minor updates. For Neo 3
|
||||
development version please refer to the [**master**
|
||||
branch](https://github.com/nspcc-dev/neo-go/tree/master) and releases
|
||||
after 0.90.0. Releases before 0.80.0 (**0.7X.Y** track) are made from this
|
||||
branch and only contain Neo 2 code.
|
||||
|
||||
# Getting started
|
||||
|
||||
## Installation
|
||||
|
||||
Go: 1.12+
|
||||
Go: 1.13+
|
||||
|
||||
Install dependencies.
|
||||
|
||||
|
@ -62,6 +68,8 @@ Build the **neo-go** CLI:
|
|||
make build
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
Quick start a NEO node on the private network. This requires the [neo-privatenet](https://hub.docker.com/r/cityofzion/neo-privatenet/) Docker image running on your machine.
|
||||
|
||||
```
|
||||
|
@ -85,7 +93,29 @@ Available network flags:
|
|||
- `--privnet, -p`
|
||||
- `--testnet, -t`
|
||||
|
||||
#Developer notes
|
||||
#### Importing mainnet/testnet dump files
|
||||
|
||||
If you want to jump-start your mainnet or testnet node with [chain archives
|
||||
provided by NGD](https://sync.ngd.network/) follow these instructions:
|
||||
```
|
||||
$ wget .../chain.acc.zip # chain dump file
|
||||
$ unzip chain.acc.zip
|
||||
$ ./bin/neo-go db restore -m -i chain.acc # for testnet use '-t' flag instead of '-m'
|
||||
```
|
||||
|
||||
The process differs from the C# node in that block importing is a separate
|
||||
mode, after it ends the node can be started normally.
|
||||
|
||||
## Smart contract development
|
||||
|
||||
Please refer to [neo-go smart contract development
|
||||
workshop](https://github.com/nspcc-dev/neo-go-sc-wrkshp) that shows some
|
||||
simple contracts that can be compiled/deployed/run using neo-go compiler, SDK
|
||||
and private network. For details on how Go code is translated to Neo VM
|
||||
bytecode and what you can and can not do in smart contract please refer to the
|
||||
[compiler documentation](docs/compiler.md).
|
||||
|
||||
# Developer notes
|
||||
Nodes have such features as [Prometheus](https://prometheus.io/docs/guides/go-application) and
|
||||
[Pprof](https://golang.org/pkg/net/http/pprof/) in order to have additional information about them for debugging.
|
||||
|
||||
|
@ -98,10 +128,10 @@ In `config/protocol.*.yml` there is
|
|||
```
|
||||
where you can switch on/off and define port. Prometheus is enabled and Pprof is disabled by default.
|
||||
|
||||
# Contributing
|
||||
## Contributing
|
||||
|
||||
Feel free to contribute to this project after reading the
|
||||
[contributing guidelines](https://github.com/nspcc-dev/neo-go/blob/master/CONTRIBUTING.md).
|
||||
[contributing guidelines](CONTRIBUTING.md).
|
||||
|
||||
Before starting to work on a certain topic, create an new issue first,
|
||||
describing the feature/topic you are going to implement.
|
||||
|
@ -114,4 +144,4 @@ describing the feature/topic you are going to implement.
|
|||
|
||||
# License
|
||||
|
||||
- Open-source [MIT](https://github.com/nspcc-dev/neo-go/blob/master/LICENSE.md)
|
||||
- Open-source [MIT](LICENSE.md)
|
||||
|
|
14
ROADMAP.md
14
ROADMAP.md
|
@ -1,14 +0,0 @@
|
|||
# Roadmap for neo-go
|
||||
|
||||
This defines approximate plan of neo-go releases and key features planned for
|
||||
them. Things can change if there a need to push a bugfix or some critical
|
||||
functionality.
|
||||
|
||||
## Versions 0.7X.Y (as needed)
|
||||
* Neo 2.0 support (bug fixes, minor functionality additions)
|
||||
|
||||
## Version 0.90.0 (April 2020)
|
||||
* Neo 3.0 features preview
|
||||
|
||||
## Version 1.0 (2020, aligned with NEO 3.0 release)
|
||||
* full NEO 3.0 support
|
|
@ -8,6 +8,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
@ -33,35 +34,7 @@ func toNeoStorageKey(key []byte) []byte {
|
|||
if len(key) < util.Uint160Size {
|
||||
panic("invalid key in storage")
|
||||
}
|
||||
|
||||
var nkey []byte
|
||||
for i := util.Uint160Size - 1; i >= 0; i-- {
|
||||
nkey = append(nkey, key[i])
|
||||
}
|
||||
|
||||
key = key[util.Uint160Size:]
|
||||
|
||||
index := 0
|
||||
remain := len(key)
|
||||
for remain >= 16 {
|
||||
nkey = append(nkey, key[index:index+16]...)
|
||||
nkey = append(nkey, 0)
|
||||
index += 16
|
||||
remain -= 16
|
||||
}
|
||||
|
||||
if remain > 0 {
|
||||
nkey = append(nkey, key[index:]...)
|
||||
}
|
||||
|
||||
padding := 16 - remain
|
||||
for i := 0; i < padding; i++ {
|
||||
nkey = append(nkey, 0)
|
||||
}
|
||||
|
||||
nkey = append(nkey, byte(padding))
|
||||
|
||||
return nkey
|
||||
return mpt.ToNeoStorageKey(key)
|
||||
}
|
||||
|
||||
// batchToMap converts batch to a map so that JSON is compatible
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
|
@ -37,26 +38,26 @@ func NewCommands() []cli.Command {
|
|||
Name: "count, c",
|
||||
Usage: "number of blocks to be processed (default or 0: all chain)",
|
||||
},
|
||||
)
|
||||
var cfgCountOutFlags = make([]cli.Flag, len(cfgWithCountFlags))
|
||||
copy(cfgCountOutFlags, cfgWithCountFlags)
|
||||
cfgCountOutFlags = append(cfgCountOutFlags,
|
||||
cli.UintFlag{
|
||||
Name: "start, s",
|
||||
Usage: "block number to start from (default: 0)",
|
||||
},
|
||||
)
|
||||
var cfgCountOutFlags = make([]cli.Flag, len(cfgWithCountFlags))
|
||||
copy(cfgCountOutFlags, cfgWithCountFlags)
|
||||
cfgCountOutFlags = append(cfgCountOutFlags,
|
||||
cli.StringFlag{
|
||||
Name: "out, o",
|
||||
Usage: "Output file (stdout if not given)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "state, r",
|
||||
Usage: "File to export state roots to",
|
||||
},
|
||||
)
|
||||
var cfgCountInFlags = make([]cli.Flag, len(cfgWithCountFlags))
|
||||
copy(cfgCountInFlags, cfgWithCountFlags)
|
||||
cfgCountInFlags = append(cfgCountInFlags,
|
||||
cli.UintFlag{
|
||||
Name: "skip, s",
|
||||
Usage: "number of blocks to skip (default: 0)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "in, i",
|
||||
Usage: "Input file (stdin if not given)",
|
||||
|
@ -65,6 +66,14 @@ func NewCommands() []cli.Command {
|
|||
Name: "dump",
|
||||
Usage: "directory for storing JSON dumps",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "diff, k",
|
||||
Usage: "Use if DB is restore from diff and not full dump",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "state, r",
|
||||
Usage: "File to import state roots from",
|
||||
},
|
||||
)
|
||||
return []cli.Command{
|
||||
{
|
||||
|
@ -78,8 +87,10 @@ func NewCommands() []cli.Command {
|
|||
Usage: "database manipulations",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "dump blocks (starting with block #1) to the file",
|
||||
Name: "dump",
|
||||
Usage: "dump blocks (starting with block #1) to the file",
|
||||
UsageText: "When --start option is provided format is different because " +
|
||||
"index of the first block is written first.",
|
||||
Action: dumpDB,
|
||||
Flags: cfgCountOutFlags,
|
||||
},
|
||||
|
@ -202,19 +213,47 @@ func dumpDB(ctx *cli.Context) error {
|
|||
if count == 0 {
|
||||
count = chainCount - start
|
||||
}
|
||||
|
||||
var rootsWriter *io.BinWriter
|
||||
if out := ctx.String("state"); out != "" {
|
||||
rootsStream, err := os.Create(out)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("can't create file for state roots: %w", err), 1)
|
||||
}
|
||||
defer rootsStream.Close()
|
||||
rootsWriter = io.NewBinWriterFromIO(rootsStream)
|
||||
}
|
||||
if start != 0 {
|
||||
writer.WriteU32LE(start)
|
||||
}
|
||||
writer.WriteU32LE(count)
|
||||
|
||||
rootsCount := count
|
||||
if rootsWriter != nil {
|
||||
rootsWriter.WriteU32LE(start)
|
||||
if h := chain.StateHeight() + 1; start+rootsCount > h {
|
||||
log.Info("state height is low, state root dump will be cut", zap.Uint32("height", h))
|
||||
rootsCount = h - start
|
||||
}
|
||||
rootsWriter.WriteU32LE(rootsCount)
|
||||
}
|
||||
|
||||
for i := start; i < start+count; i++ {
|
||||
if rootsWriter != nil && i < start+rootsCount {
|
||||
r, err := chain.GetStateRoot(i)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to get stateroot %d: %w", i, err), 1)
|
||||
}
|
||||
if err := writeSizedItem(&r.MPTRoot, rootsWriter); err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
}
|
||||
bh := chain.GetHeaderHash(int(i))
|
||||
b, err := chain.GetBlock(bh)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to get block %d: %s", i, err), 1)
|
||||
}
|
||||
buf := io.NewBufBinWriter()
|
||||
b.EncodeBinary(buf.BinWriter)
|
||||
bytes := buf.Bytes()
|
||||
writer.WriteU32LE(uint32(len(bytes)))
|
||||
writer.WriteBytes(bytes)
|
||||
if writer.Err != nil {
|
||||
if err := writeSizedItem(b, writer); err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
}
|
||||
|
@ -224,6 +263,15 @@ func dumpDB(ctx *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func writeSizedItem(item io.Serializable, w *io.BinWriter) error {
|
||||
buf := io.NewBufBinWriter()
|
||||
item.EncodeBinary(buf.BinWriter)
|
||||
bytes := buf.Bytes()
|
||||
w.WriteU32LE(uint32(len(bytes)))
|
||||
w.WriteBytes(bytes)
|
||||
return w.Err
|
||||
}
|
||||
|
||||
func restoreDB(ctx *cli.Context) error {
|
||||
cfg, err := getConfigFromContext(ctx)
|
||||
if err != nil {
|
||||
|
@ -234,7 +282,7 @@ func restoreDB(ctx *cli.Context) error {
|
|||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
count := uint32(ctx.Uint("count"))
|
||||
skip := uint32(ctx.Uint("skip"))
|
||||
start := uint32(ctx.Uint("start"))
|
||||
|
||||
var inStream = os.Stdin
|
||||
if in := ctx.String("in"); in != "" {
|
||||
|
@ -246,6 +294,16 @@ func restoreDB(ctx *cli.Context) error {
|
|||
defer inStream.Close()
|
||||
reader := io.NewBinReaderFromIO(inStream)
|
||||
|
||||
var rootsIn *io.BinReader
|
||||
if in := ctx.String("state"); in != "" {
|
||||
inStream, err := os.Open(in)
|
||||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
defer inStream.Close()
|
||||
rootsIn = io.NewBinReaderFromIO(inStream)
|
||||
}
|
||||
|
||||
dumpDir := ctx.String("dump")
|
||||
if dumpDir != "" {
|
||||
cfg.ProtocolConfiguration.SaveStorageBatch = true
|
||||
|
@ -259,23 +317,61 @@ func restoreDB(ctx *cli.Context) error {
|
|||
defer prometheus.ShutDown()
|
||||
defer pprof.ShutDown()
|
||||
|
||||
var allBlocks = reader.ReadU32LE()
|
||||
if reader.Err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
dumpStart := uint32(0)
|
||||
dumpSize := reader.ReadU32LE()
|
||||
if ctx.Bool("diff") {
|
||||
// in diff first uint32 is the index of the first block
|
||||
dumpStart = dumpSize
|
||||
dumpSize = reader.ReadU32LE()
|
||||
}
|
||||
if skip+count > allBlocks {
|
||||
return cli.NewExitError(fmt.Errorf("input file has only %d blocks, can't read %d starting from %d", allBlocks, count, skip), 1)
|
||||
if reader.Err != nil {
|
||||
return cli.NewExitError(reader.Err, 1)
|
||||
}
|
||||
if start < dumpStart {
|
||||
return cli.NewExitError(fmt.Errorf("input file start from %d block, can't import %d", dumpStart, start), 1)
|
||||
}
|
||||
|
||||
lastBlock := dumpStart + dumpSize
|
||||
if start+count > lastBlock {
|
||||
return cli.NewExitError(fmt.Errorf("input file has blocks up until %d, can't read %d starting from %d", lastBlock, count, start), 1)
|
||||
}
|
||||
if count == 0 {
|
||||
count = allBlocks - skip
|
||||
count = lastBlock - start
|
||||
}
|
||||
i := uint32(0)
|
||||
for ; i < skip; i++ {
|
||||
_, err := readBlock(reader)
|
||||
|
||||
var rootStart, rootSize uint32
|
||||
rootCount := count
|
||||
if rootsIn != nil {
|
||||
rootStart = rootsIn.ReadU32LE()
|
||||
rootSize = rootsIn.ReadU32LE()
|
||||
if rootsIn.Err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("error while reading roots file: %w", rootsIn.Err), 1)
|
||||
}
|
||||
if start < rootStart {
|
||||
return cli.NewExitError(fmt.Errorf("roots file start from %d root, can't import %d", rootStart, start), 1)
|
||||
}
|
||||
lastRoot := rootStart + rootSize
|
||||
if rootStart+rootCount > lastRoot {
|
||||
log.Info("state root height is low", zap.Uint32("height", lastRoot))
|
||||
rootCount = lastRoot - rootStart
|
||||
}
|
||||
}
|
||||
|
||||
i := dumpStart
|
||||
for ; i < start; i++ {
|
||||
_, err := readBytes(reader)
|
||||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
}
|
||||
if rootsIn != nil {
|
||||
for j := rootStart; j < start; j++ {
|
||||
_, err := readBytes(rootsIn)
|
||||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gctx := newGraceContext()
|
||||
var lastIndex uint32
|
||||
|
@ -284,47 +380,66 @@ func restoreDB(ctx *cli.Context) error {
|
|||
_ = dump.tryPersist(dumpDir, lastIndex)
|
||||
}()
|
||||
|
||||
for ; i < skip+count; i++ {
|
||||
for ; i < start+count; i++ {
|
||||
select {
|
||||
case <-gctx.Done():
|
||||
return cli.NewExitError("cancelled", 1)
|
||||
default:
|
||||
}
|
||||
bytes, err := readBlock(reader)
|
||||
block := &block.Block{}
|
||||
newReader := io.NewBinReaderFromBuf(bytes)
|
||||
block.DecodeBinary(newReader)
|
||||
if err != nil {
|
||||
block := new(block.Block)
|
||||
if err := readSizedItem(block, reader); err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
if block.Index == 0 && i == 0 && skip == 0 {
|
||||
var skipBlock bool
|
||||
if block.Index == 0 && i == 0 && start == 0 {
|
||||
genesis, err := chain.GetBlock(block.Hash())
|
||||
if err == nil && genesis.Index == 0 {
|
||||
log.Info("skipped genesis block", zap.String("hash", block.Hash().StringLE()))
|
||||
continue
|
||||
skipBlock = true
|
||||
}
|
||||
}
|
||||
err = chain.AddBlock(block)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to add block %d: %s", i, err), 1)
|
||||
}
|
||||
|
||||
if dumpDir != "" {
|
||||
batch := chain.LastBatch()
|
||||
dump.add(block.Index, batch)
|
||||
lastIndex = block.Index
|
||||
if block.Index%1000 == 0 {
|
||||
if err := dump.tryPersist(dumpDir, block.Index); err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("can't dump storage to file: %v", err), 1)
|
||||
if !skipBlock {
|
||||
err = chain.AddBlock(block)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to add block %d: %s", i, err), 1)
|
||||
}
|
||||
if dumpDir != "" {
|
||||
batch := chain.LastBatch()
|
||||
dump.add(block.Index, batch)
|
||||
lastIndex = block.Index
|
||||
if block.Index%1000 == 0 {
|
||||
if err := dump.tryPersist(dumpDir, block.Index); err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("can't dump storage to file: %v", err), 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if rootsIn != nil && i < rootStart+rootCount {
|
||||
sr := new(state.MPTRoot)
|
||||
if err := readSizedItem(sr, rootsIn); err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
err = chain.AddStateRoot(sr)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("can't add state root: %w", err), 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readBlock performs reading of block size and then bytes with the length equal to that size.
|
||||
func readBlock(reader *io.BinReader) ([]byte, error) {
|
||||
func readSizedItem(item io.Serializable, r *io.BinReader) error {
|
||||
bytes, err := readBytes(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newReader := io.NewBinReaderFromBuf(bytes)
|
||||
item.DecodeBinary(newReader)
|
||||
return newReader.Err
|
||||
}
|
||||
|
||||
// readBytes performs reading of block size and then bytes with the length equal to that size.
|
||||
func readBytes(reader *io.BinReader) ([]byte, error) {
|
||||
var size = reader.ReadU32LE()
|
||||
bytes := make([]byte, size)
|
||||
reader.ReadBytes(bytes)
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/go-yaml/yaml"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/compiler"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
|
@ -28,12 +27,14 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoEndpoint = errors.New("no RPC endpoint specified, use option '--endpoint' or '-e'")
|
||||
errNoInput = errors.New("no input file was found, specify an input file with the '--in or -i' flag")
|
||||
errNoConfFile = errors.New("no config file was found, specify a config file with the '--config' or '-c' flag")
|
||||
errNoMethod = errors.New("no method specified for function invocation command")
|
||||
errNoWallet = errors.New("no wallet parameter found, specify it with the '--wallet or -w' flag")
|
||||
errNoScriptHash = errors.New("no smart contract hash was provided, specify one as the first argument")
|
||||
errNoSmartContractName = errors.New("no name was provided, specify the '--name or -n' flag")
|
||||
|
@ -53,7 +54,11 @@ var (
|
|||
}
|
||||
gasFlag = flags.Fixed8Flag{
|
||||
Name: "gas, g",
|
||||
Usage: "gas to add to the transaction",
|
||||
Usage: "gas to add to the transaction (network fee)",
|
||||
}
|
||||
sysGasFlag = flags.Fixed8Flag{
|
||||
Name: "sysgas, s",
|
||||
Usage: "system fee to add to invocation transaction",
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -67,6 +72,9 @@ import "github.com/nspcc-dev/neo-go/pkg/interop/runtime"
|
|||
func Main(op string, args []interface{}) {
|
||||
runtime.Notify("Hello world!")
|
||||
}`
|
||||
|
||||
// hashesForVerifyingSeparator delimits `invoke*` parameters from hashes for verifying
|
||||
hashesForVerifyingSeparator = "--"
|
||||
)
|
||||
|
||||
// NewCommands returns 'contract' command.
|
||||
|
@ -96,6 +104,14 @@ func NewCommands() []cli.Command {
|
|||
Name: "debug, d",
|
||||
Usage: "Emit debug info in a separate file",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "abi, a",
|
||||
Usage: "Emit application binary interface (.abi.json) file into separate file using configuration input file (*.yml)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "config, c",
|
||||
Usage: "Configuration input file (*.yml)",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -126,7 +142,7 @@ func NewCommands() []cli.Command {
|
|||
{
|
||||
Name: "invoke",
|
||||
Usage: "invoke deployed contract on the blockchain",
|
||||
UsageText: "neo-go contract invoke -e endpoint -w wallet [-a address] [-g gas] scripthash [arguments...]",
|
||||
UsageText: "neo-go contract invoke -e endpoint -w wallet [-a address] [-g gas] [-s sysgas] scripthash [arguments...]",
|
||||
Description: `Executes given (as a script hash) deployed script with the given arguments.
|
||||
See testinvoke documentation for the details about parameters. It differs
|
||||
from testinvoke in that this command sends an invocation transaction to
|
||||
|
@ -138,12 +154,13 @@ func NewCommands() []cli.Command {
|
|||
walletFlag,
|
||||
addressFlag,
|
||||
gasFlag,
|
||||
sysGasFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "invokefunction",
|
||||
Usage: "invoke deployed contract on the blockchain",
|
||||
UsageText: "neo-go contract invokefunction -e endpoint -w wallet [-a address] [-g gas] scripthash [method] [arguments...]",
|
||||
UsageText: "neo-go contract invokefunction -e endpoint -w wallet [-a address] [-g gas] [-s sysgas] scripthash [method] [arguments...]",
|
||||
Description: `Executes given (as a script hash) deployed script with the given method and
|
||||
and arguments. See testinvokefunction documentation for the details about
|
||||
parameters. It differs from testinvokefunction in that this command sends an
|
||||
|
@ -155,13 +172,15 @@ func NewCommands() []cli.Command {
|
|||
walletFlag,
|
||||
addressFlag,
|
||||
gasFlag,
|
||||
sysGasFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "testinvoke",
|
||||
Usage: "invoke deployed contract on the blockchain (test mode)",
|
||||
UsageText: "neo-go contract testinvoke -e endpoint scripthash [arguments...]",
|
||||
Description: `Executes given (as a script hash) deployed script with the given arguments.
|
||||
UsageText: "neo-go contract testinvoke -e endpoint scripthash [arguments...] [-- hashesForVerifying...]",
|
||||
Description: `Executes given (as a script hash) deployed script with the given arguments
|
||||
and hashes to verify using runtime.CheckWitness syscall.
|
||||
It's very similar to the tesinvokefunction command, but differs in the way
|
||||
arguments are being passed. This invoker does not accept method parameter
|
||||
and it passes all given parameters as plain values to the contract, not
|
||||
|
@ -180,13 +199,14 @@ func NewCommands() []cli.Command {
|
|||
{
|
||||
Name: "testinvokefunction",
|
||||
Usage: "invoke deployed contract on the blockchain (test mode)",
|
||||
UsageText: "neo-go contract testinvokefunction -e endpoint scripthash [method] [arguments...]",
|
||||
Description: `Executes given (as a script hash) deployed script with the given method and
|
||||
arguments. If no method is given "" is passed to the script, if no arguments
|
||||
are given, an empty array is passed. All of the given arguments are
|
||||
encapsulated into array before invoking the script. The script thus should
|
||||
follow the regular convention of smart contract arguments (method string and
|
||||
an array of other arguments).
|
||||
UsageText: "neo-go contract testinvokefunction -e endpoint scripthash [method] [arguments...] [-- hashesForVerifying...]",
|
||||
Description: `Executes given (as a script hash) deployed script with the given method,
|
||||
arguments and hashes to verify using System.Runtime.CheckWitness syscall.
|
||||
If no method is given "" is passed to the script, if no arguments
|
||||
are given, an empty array is passed, if no hashes are given, no array is
|
||||
passed. All of the given arguments are encapsulated into array before
|
||||
invoking the script. The script thus should follow the regular convention
|
||||
of smart contract arguments (method string and an array of other arguments).
|
||||
|
||||
Arguments always do have regular Neo smart contract parameter types, either
|
||||
specified explicitly or being inferred from the value. To specify the type
|
||||
|
@ -242,6 +262,15 @@ func NewCommands() []cli.Command {
|
|||
* 'string\:string' is a string with a value of 'string:string'
|
||||
* '03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c' is a
|
||||
key with a value of '03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c'
|
||||
|
||||
HashesForVerifying represent a set of Uint160 hashes which are used to verify
|
||||
hashes in System.Runtime.CheckWitness syscall. To specify hash use its
|
||||
hex-encoded 160 bit (20 byte) LE representation with optional '0x' prefix.
|
||||
If no hashes were specified, no array is passed.
|
||||
|
||||
Examples:
|
||||
* '0000000009070e030d0f0e020d0c06050e030c02'
|
||||
* '0x0000000009070e030d0f0e020d0c06050e030c02'
|
||||
`,
|
||||
Action: testInvokeFunction,
|
||||
Flags: []cli.Flag{
|
||||
|
@ -249,9 +278,10 @@ func NewCommands() []cli.Command {
|
|||
},
|
||||
},
|
||||
{
|
||||
Name: "testinvokescript",
|
||||
Usage: "Invoke compiled AVM code on the blockchain (test mode, not creating a transaction for it)",
|
||||
Action: testInvokeScript,
|
||||
Name: "testinvokescript",
|
||||
Usage: "Invoke compiled AVM code on the blockchain (test mode, not creating a transaction for it)",
|
||||
UsageText: "neo-go contract testinvokescript -e endpoint -i testcontract.avm [-- hashesForVerifying...]",
|
||||
Action: testInvokeScript,
|
||||
Flags: []cli.Flag{
|
||||
endpointFlag,
|
||||
cli.StringFlag{
|
||||
|
@ -348,11 +378,25 @@ func contractCompile(ctx *cli.Context) error {
|
|||
if len(src) == 0 {
|
||||
return cli.NewExitError(errNoInput, 1)
|
||||
}
|
||||
abi := ctx.String("abi")
|
||||
confFile := ctx.String("config")
|
||||
if len(abi) != 0 && len(confFile) == 0 {
|
||||
return cli.NewExitError(errNoConfFile, 1)
|
||||
}
|
||||
|
||||
o := &compiler.Options{
|
||||
Outfile: ctx.String("out"),
|
||||
|
||||
DebugInfo: ctx.String("debug"),
|
||||
ABIInfo: abi,
|
||||
}
|
||||
|
||||
if len(confFile) != 0 {
|
||||
conf, err := parseContractConfig(confFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.ContractDetails = &conf.Contract
|
||||
}
|
||||
|
||||
result, err := compiler.CompileAndSave(src, o)
|
||||
|
@ -384,13 +428,16 @@ func invokeFunction(ctx *cli.Context) error {
|
|||
|
||||
func invokeInternal(ctx *cli.Context, withMethod bool, signAndPush bool) error {
|
||||
var (
|
||||
err error
|
||||
gas util.Fixed8
|
||||
operation string
|
||||
params = make([]smartcontract.Parameter, 0)
|
||||
paramsStart = 1
|
||||
resp *result.Invoke
|
||||
acc *wallet.Account
|
||||
err error
|
||||
gas util.Fixed8
|
||||
sysGas util.Fixed8
|
||||
operation string
|
||||
params = make([]smartcontract.Parameter, 0)
|
||||
paramsStart = 1
|
||||
hashesForVerifying []util.Uint160
|
||||
hashesForVerifyingStart = 0
|
||||
resp *result.Invoke
|
||||
acc *wallet.Account
|
||||
)
|
||||
|
||||
endpoint := ctx.String("endpoint")
|
||||
|
@ -403,12 +450,22 @@ func invokeInternal(ctx *cli.Context, withMethod bool, signAndPush bool) error {
|
|||
return cli.NewExitError(errNoScriptHash, 1)
|
||||
}
|
||||
script := args[0]
|
||||
if withMethod && len(args) > 1 {
|
||||
if withMethod {
|
||||
if len(args) <= 1 {
|
||||
return cli.NewExitError(errNoMethod, 1)
|
||||
}
|
||||
operation = args[1]
|
||||
paramsStart++
|
||||
}
|
||||
if len(args) > paramsStart {
|
||||
for k, s := range args[paramsStart:] {
|
||||
if s == hashesForVerifyingSeparator {
|
||||
if signAndPush {
|
||||
return cli.NewExitError("adding hashes for verifying available for test invokes only", 1)
|
||||
}
|
||||
hashesForVerifyingStart = paramsStart + k + 1
|
||||
break
|
||||
}
|
||||
param, err := smartcontract.NewParameterFromString(s)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to parse argument #%d: %v", k+paramsStart+1, err), 1)
|
||||
|
@ -417,8 +474,19 @@ func invokeInternal(ctx *cli.Context, withMethod bool, signAndPush bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
if len(args) >= hashesForVerifyingStart && hashesForVerifyingStart > 0 {
|
||||
for i, c := range args[hashesForVerifyingStart:] {
|
||||
h, err := parseUint160(c)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to parse hash for verifying #%d: %v", i+1, err), 1)
|
||||
}
|
||||
hashesForVerifying = append(hashesForVerifying, h)
|
||||
}
|
||||
}
|
||||
|
||||
if signAndPush {
|
||||
gas = flags.Fixed8FromContext(ctx, "gas")
|
||||
sysGas = flags.Fixed8FromContext(ctx, "sysgas")
|
||||
acc, err = getAccFromContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -430,9 +498,9 @@ func invokeInternal(ctx *cli.Context, withMethod bool, signAndPush bool) error {
|
|||
}
|
||||
|
||||
if withMethod {
|
||||
resp, err = c.InvokeFunction(script, operation, params)
|
||||
resp, err = c.InvokeFunction(script, operation, params, hashesForVerifying)
|
||||
} else {
|
||||
resp, err = c.Invoke(script, params)
|
||||
resp, err = c.Invoke(script, params, hashesForVerifying)
|
||||
}
|
||||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
|
@ -445,7 +513,7 @@ func invokeInternal(ctx *cli.Context, withMethod bool, signAndPush bool) error {
|
|||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("bad script returned from the RPC node: %v", err), 1)
|
||||
}
|
||||
txHash, err := c.SignAndPushInvocationTx(script, acc, 0, gas)
|
||||
txHash, err := c.SignAndPushInvocationTx(script, acc, sysGas, gas)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to push invocation tx: %v", err), 1)
|
||||
}
|
||||
|
@ -477,13 +545,25 @@ func testInvokeScript(ctx *cli.Context) error {
|
|||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
|
||||
args := ctx.Args()
|
||||
var hashesForVerifying []util.Uint160
|
||||
if args.Present() {
|
||||
for i, c := range args[:] {
|
||||
h, err := parseUint160(c)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("failed to parse hash for verifying #%d: %v", i+1, err), 1)
|
||||
}
|
||||
hashesForVerifying = append(hashesForVerifying, h)
|
||||
}
|
||||
}
|
||||
|
||||
c, err := client.New(context.TODO(), endpoint, client.Options{})
|
||||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
|
||||
scriptHex := hex.EncodeToString(b)
|
||||
resp, err := c.InvokeScript(scriptHex)
|
||||
resp, err := c.InvokeScript(scriptHex, hashesForVerifying)
|
||||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
|
@ -498,14 +578,21 @@ func testInvokeScript(ctx *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parseUint160(s string) (util.Uint160, error) {
|
||||
if len(s) == 2*util.Uint160Size+2 && s[0] == '0' && s[1] == 'x' {
|
||||
s = s[2:]
|
||||
}
|
||||
return util.Uint160DecodeStringLE(s)
|
||||
}
|
||||
|
||||
// ProjectConfig contains project metadata.
|
||||
type ProjectConfig struct {
|
||||
Version uint
|
||||
Contract request.ContractDetails `yaml:"project"`
|
||||
Contract smartcontract.ContractDetails `yaml:"project"`
|
||||
}
|
||||
|
||||
func parseContractDetails() request.ContractDetails {
|
||||
details := request.ContractDetails{}
|
||||
func parseContractDetails() smartcontract.ContractDetails {
|
||||
details := smartcontract.ContractDetails{}
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Print("Author: ")
|
||||
|
@ -610,15 +697,9 @@ func contractDeploy(ctx *cli.Context) error {
|
|||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
confBytes, err := ioutil.ReadFile(confFile)
|
||||
conf, err := parseContractConfig(confFile)
|
||||
if err != nil {
|
||||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
|
||||
conf := ProjectConfig{}
|
||||
err = yaml.Unmarshal(confBytes, &conf)
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("bad config: %v", err), 1)
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := client.New(context.TODO(), endpoint, client.Options{})
|
||||
|
@ -640,3 +721,17 @@ func contractDeploy(ctx *cli.Context) error {
|
|||
fmt.Printf("Sent deployment transaction %s for contract %s\n", txHash.StringLE(), hash.Hash160(avm).StringLE())
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseContractConfig(confFile string) (ProjectConfig, error) {
|
||||
conf := ProjectConfig{}
|
||||
confBytes, err := ioutil.ReadFile(confFile)
|
||||
if err != nil {
|
||||
return conf, cli.NewExitError(err, 1)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(confBytes, &conf)
|
||||
if err != nil {
|
||||
return conf, cli.NewExitError(fmt.Errorf("bad config: %v", err), 1)
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
|
|
@ -48,6 +48,10 @@ var (
|
|||
Name: "rpc, r",
|
||||
Usage: "RPC node address",
|
||||
}
|
||||
r14Flag = cli.StringFlag{
|
||||
Name: "remark14",
|
||||
Usage: "Remark14 field",
|
||||
}
|
||||
timeoutFlag = cli.DurationFlag{
|
||||
Name: "timeout, t",
|
||||
Usage: "Timeout for the operation",
|
||||
|
@ -188,6 +192,7 @@ func NewCommands() []cli.Command {
|
|||
Flags: []cli.Flag{
|
||||
walletPathFlag,
|
||||
rpcFlag,
|
||||
r14Flag,
|
||||
timeoutFlag,
|
||||
outFlag,
|
||||
fromAddrFlag,
|
||||
|
@ -487,6 +492,8 @@ func transferAsset(ctx *cli.Context) error {
|
|||
return cli.NewExitError(fmt.Errorf("wallet contains no account for '%s'", from), 1)
|
||||
}
|
||||
|
||||
remark14 := ctx.String("remark14")
|
||||
|
||||
asset, err := getAssetID(ctx.String("asset"))
|
||||
if err != nil {
|
||||
return cli.NewExitError(fmt.Errorf("invalid asset id: %v", err), 1)
|
||||
|
@ -517,6 +524,13 @@ func transferAsset(ctx *cli.Context) error {
|
|||
return cli.NewExitError(err, 1)
|
||||
}
|
||||
|
||||
if remark14 != "" {
|
||||
tx.Attributes = append(tx.Attributes, transaction.Attribute{
|
||||
Usage: transaction.Remark14,
|
||||
Data: []byte(remark14),
|
||||
})
|
||||
}
|
||||
|
||||
toFlag := ctx.Generic("to").(*flags.Address)
|
||||
if !toFlag.IsSet {
|
||||
return cli.NewExitError("'to' address was not provided", 1)
|
||||
|
|
|
@ -2,8 +2,12 @@ ProtocolConfiguration:
|
|||
Magic: 7630401
|
||||
AddressVersion: 23
|
||||
SecondsPerBlock: 15
|
||||
EnableStateRoot: true
|
||||
StateRootEnableIndex: 6016000
|
||||
KeepOnlyLatestState: false
|
||||
LowPriorityThreshold: 0.001
|
||||
MemPoolSize: 50000
|
||||
NoBonusHeight: 8000000
|
||||
StandbyValidators:
|
||||
- 03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c
|
||||
- 02df48f60e8f3e01c48ff40b9b7f1310d7a8b2a193188befe1c2e3df740e895093
|
||||
|
@ -30,10 +34,11 @@ ProtocolConfiguration:
|
|||
RegisterTransaction: 10000
|
||||
VerifyBlocks: true
|
||||
VerifyTransactions: false
|
||||
FreeGasLimit: 10.0
|
||||
MaxTransactionsPerBlock: 500
|
||||
MaxFreeTransactionsPerBlock: 20
|
||||
FreeGasLimit: {0: 10.0, 6195000: 50.0}
|
||||
MaxTransactionsPerBlock: {0: 500, 6195000: 200}
|
||||
MaxFreeTransactionsPerBlock: {0: 20, 6195000: 199}
|
||||
MaxFreeTransactionSize: 1024
|
||||
MinimumNetworkFee: 0
|
||||
FeePerExtraByte: 0.00001
|
||||
|
||||
ApplicationConfiguration:
|
||||
|
|
|
@ -2,8 +2,12 @@ ProtocolConfiguration:
|
|||
Magic: 1953787457
|
||||
AddressVersion: 23
|
||||
SecondsPerBlock: 15
|
||||
EnableStateRoot: true
|
||||
StateRootEnableIndex: 4380100
|
||||
KeepOnlyLatestState: false
|
||||
LowPriorityThreshold: 0.000
|
||||
MemPoolSize: 50000
|
||||
NoBonusHeight: 8000000
|
||||
StandbyValidators:
|
||||
- 0327da12b5c40200e9f65569476bbff2218da4f32548ff43b6387ec1416a231ee8
|
||||
- 026ce35b29147ad09e4afe4ec4a7319095f08198fa8babbe3c56e970b143528d22
|
||||
|
@ -30,10 +34,11 @@ ProtocolConfiguration:
|
|||
RegisterTransaction: 100
|
||||
VerifyBlocks: true
|
||||
VerifyTransactions: false
|
||||
FreeGasLimit: 10.0
|
||||
MaxTransactionsPerBlock: 500
|
||||
MaxFreeTransactionsPerBlock: 20
|
||||
FreeGasLimit: {0: 10.0, 4840000: 50.0}
|
||||
MaxTransactionsPerBlock: {0: 500, 4840000: 200}
|
||||
MaxFreeTransactionsPerBlock: {0: 20, 4840000: 199}
|
||||
MaxFreeTransactionSize: 1024
|
||||
MinimumNetworkFee: 0
|
||||
FeePerExtraByte: 0.00001
|
||||
|
||||
ApplicationConfiguration:
|
||||
|
|
|
@ -2,6 +2,8 @@ ProtocolConfiguration:
|
|||
Magic: 56753
|
||||
AddressVersion: 23
|
||||
SecondsPerBlock: 15
|
||||
EnableStateRoot: true
|
||||
KeepOnlyLatestState: false
|
||||
LowPriorityThreshold: 0.000
|
||||
MemPoolSize: 50000
|
||||
StandbyValidators:
|
||||
|
@ -49,9 +51,10 @@ ApplicationConfiguration:
|
|||
AttemptConnPeers: 5
|
||||
MinPeers: 1
|
||||
RPC:
|
||||
Address: 127.0.0.1
|
||||
Enabled: true
|
||||
EnableCORSWorkaround: false
|
||||
Port: 20332
|
||||
Port: 0 # let the system choose port dynamically
|
||||
Prometheus:
|
||||
Enabled: false #since it's not useful for unit tests.
|
||||
Port: 2112
|
||||
|
|
|
@ -166,7 +166,7 @@ INDEX OPCODE DESC
|
|||
```
|
||||
|
||||
In depth documentation about the **neo-go** compiler and smart contract examples can be found inside
|
||||
the [compiler package](https://github.com/nspcc-dev/neo-go/tree/master/pkg/compiler).
|
||||
the [compiler package](pkg/compiler).
|
||||
|
||||
## VM run
|
||||
To run VM use
|
||||
|
|
173
docs/compiler.md
173
docs/compiler.md
|
@ -2,66 +2,39 @@
|
|||
|
||||
The neo-go compiler compiles Go programs to bytecode that the NEO virtual machine can understand.
|
||||
|
||||
## Currently supported
|
||||
## Language compatibility
|
||||
|
||||
### Go internals
|
||||
- type checking
|
||||
- multiple assignments
|
||||
- global variables
|
||||
- types int, string, byte and booleans
|
||||
- struct types + method receives
|
||||
- functions
|
||||
- composite literals `[]int, []string, []byte`
|
||||
- basic if statements
|
||||
- binary expressions
|
||||
- return statements
|
||||
- for loops
|
||||
- imports
|
||||
The compiler is mostly compatible with regular Go language specification, but
|
||||
there are some important deviations that you need to be aware of that make it
|
||||
a dialect of Go rather than a complete port of the language:
|
||||
* `make()` ane `new()` are not supported, most of the time you can substitute
|
||||
them with composite literals
|
||||
* there is no real distinction between different integer types, all of them
|
||||
work as big.Int in Go with a limit of 256 bit in width, so you can use
|
||||
`int` for just about anything. This is the way integers work in Neo VM and
|
||||
adding proper Go types emulation is considered to be too costly.
|
||||
* goroutines, channels and garbage collection are not supported and will
|
||||
never be because emulating that aspects of Go runtime on top of Neo VM is
|
||||
close to impossible
|
||||
* even though `panic()` is supported, `recover()` is not, `panic` shuts the
|
||||
VM down
|
||||
* lambdas are not supported
|
||||
* global variables can't be changed in functions (#638)
|
||||
* it's not possible to rename imported interop packages, they won't work this
|
||||
way (#397, #913)
|
||||
* nested selectors are not yet supported (#957)
|
||||
* using value variable in range-based loops is not yet supported (#958)
|
||||
|
||||
### Go builtins
|
||||
- len
|
||||
- append
|
||||
|
||||
### VM API (interop layer)
|
||||
## VM API (interop layer)
|
||||
Compiler translates interop function calls into NEO VM syscalls or (for custom
|
||||
functions) into NEO VM instructions. [Refer to GoDoc](https://godoc.org/github.com/nspcc-dev/neo-go/pkg/interop) for full API documentation.
|
||||
|
||||
#### Standard NEO Smart Contract API
|
||||
- account
|
||||
- asset
|
||||
- attribute
|
||||
- block
|
||||
- blockchain
|
||||
- contract
|
||||
- engine
|
||||
- header
|
||||
- input
|
||||
- iterator
|
||||
- output
|
||||
- runtime
|
||||
- storage
|
||||
- transaction
|
||||
|
||||
#### Custom VM utility helper functions
|
||||
- crypto:
|
||||
- `SHA1`
|
||||
- `SHA256`
|
||||
- `Hash256`
|
||||
- `Hash160`
|
||||
- enumerator
|
||||
- util:
|
||||
- `Equals` (to emit `EQUALS` opcode, not needed usually)
|
||||
- `FromAddress(address string) []byte`
|
||||
|
||||
## Not supported
|
||||
Due to the limitations of the NEO virtual machine, features listed below will not be supported.
|
||||
- channels
|
||||
- goroutines
|
||||
- returning multiple values from functions
|
||||
functions) into NEO VM instructions. [Refer to
|
||||
pkg.go.dev](https://pkg.go.dev/github.com/nspcc-dev/neo-go/pkg/interop)
|
||||
for full API documentation. In general it provides the same level of
|
||||
functionality as Neo .net Framework library.
|
||||
|
||||
## Quick start
|
||||
|
||||
### Compile a smart contract
|
||||
### Compiling
|
||||
|
||||
```
|
||||
./bin/neo-go contract compile -i mycontract.go
|
||||
|
@ -73,7 +46,7 @@ By default the filename will be the name of your .go file with the .avm extensio
|
|||
./bin/neo-go contract compile -i mycontract.go --out /Users/foo/bar/contract.avm
|
||||
```
|
||||
|
||||
### Debugging your smart contract
|
||||
### Debugging
|
||||
You can dump the opcodes generated by the compiler with the following command:
|
||||
|
||||
```
|
||||
|
@ -112,33 +85,89 @@ INDEX OPCODE DESC
|
|||
25 0x66 RET
|
||||
```
|
||||
|
||||
### Test invoke a compiled contract
|
||||
You can simulate a test invocation of your compiled contract by the VM, to know the total gas cost for example, with the following command:
|
||||
#### Neo Smart Contract Debugger support
|
||||
|
||||
It's possible to debug contracts written in Go using standard [Neo Smart
|
||||
Contract Debugger](https://github.com/neo-project/neo-debugger/) which is a
|
||||
part of [Neo Blockchain
|
||||
Toolkit](https://github.com/neo-project/neo-blockchain-toolkit/). To do that
|
||||
you need to generate debug information using `--debug` option, like this:
|
||||
|
||||
```
|
||||
./bin/neo-go contract testinvoke -i mycompiledcontract.avm
|
||||
$ ./bin/neo-go contract compile -i contract.go -o contract.avm --debug contract.debug.json
|
||||
```
|
||||
|
||||
Will output something like:
|
||||
```
|
||||
{
|
||||
"state": "HALT, BREAK",
|
||||
"gas_consumed": "0.006",
|
||||
"Stack": [
|
||||
{
|
||||
"type": "Integer",
|
||||
"value": "9"
|
||||
}
|
||||
]
|
||||
}
|
||||
This file can then be used by debugger and set up to work just like for any
|
||||
other supported language.
|
||||
|
||||
### Deploying
|
||||
|
||||
Deploying a contract to blockchain with neo-go requires a configuration file
|
||||
with contract's metadata in YAML format, like the following:
|
||||
|
||||
```
|
||||
project:
|
||||
author: Jack Smith
|
||||
email: jack@example.com
|
||||
version: 1.0
|
||||
name: 'Smart contract'
|
||||
description: 'Even smarter than Jack himself'
|
||||
hasstorage: true
|
||||
hasdynamicinvocation: false
|
||||
ispayable: false
|
||||
returntype: ByteArray
|
||||
parameters: ['String', 'Array']
|
||||
```
|
||||
|
||||
At the moment this is implemented via RPC call to the remote server.
|
||||
It's passed to the `deploy` command via `-c` option:
|
||||
|
||||
```
|
||||
$ ./bin/neo-go contract deploy -i contract.avm -c contract.yml -e http://localhost:20331 -w wallet.json -g 0.001
|
||||
```
|
||||
|
||||
Deployment works via an RPC server, an address of which is passed via `-e`
|
||||
option and should be signed using a wallet from `-w` option. More details can
|
||||
be found in `deploy` command help.
|
||||
|
||||
#### Neo Express support
|
||||
|
||||
It's possible to deploy contracts written in Go using [Neo
|
||||
Express](https://github.com/neo-project/neo-express) which is a part of [Neo
|
||||
Blockchain
|
||||
Toolkit](https://github.com/neo-project/neo-blockchain-toolkit/). To do that
|
||||
you need to generate a different metadata file using YAML written for
|
||||
deployment with neo-go. It's done in the same step with compilation via
|
||||
`--config` input parameter and `--abi` output parameter, combined with debug
|
||||
support the command line will look like this:
|
||||
|
||||
```
|
||||
$ ./bin/neo-go contract compile -i contract.go --config contract.yml -o contract.avm --debug contract.debug.json --abi contract.abi.json
|
||||
```
|
||||
|
||||
This file can then be used by toolkit to deploy contract the same way
|
||||
contracts in other languagues are deployed.
|
||||
|
||||
|
||||
### Invoking
|
||||
You can import your contract into the standalone VM and run it there (see [VM
|
||||
documentation](vm.md) for more info), but that only works for simple contracts
|
||||
that don't use blockchain a lot. For more real contracts you need to deploy
|
||||
them first and then do test invocations and regular invocations with `contract
|
||||
testinvokefunction` and `contract invokefunction` commands (or their variants,
|
||||
see `contract` command help for more details. They all work via RPC, so it's a
|
||||
mandatory parameter.
|
||||
|
||||
Example call (contract `f84d6a337fbc3d3a201d41da99e86b479e7a2554` with method
|
||||
`balanceOf` and method's parameter `AK2nJJpJr6o664CWJKi1QRXjqeic2zRp8y` using
|
||||
given RPC server and wallet and paying 0.00001 GAS for this transaction):
|
||||
|
||||
```
|
||||
$ ./bin/neo-go contract invokefunction -e http://localhost:20331 -w my_wallet.json -g 0.00001 f84d6a337fbc3d3a201d41da99e86b479e7a2554 balanceOf AK2nJJpJr6o664CWJKi1QRXjqeic2zRp8y
|
||||
```
|
||||
|
||||
## Smart contract examples
|
||||
|
||||
Some examples are provided in the [examples directory](https://github.com/nspcc-dev/neo-go/tree/master/examples).
|
||||
Some examples are provided in the [examples directory](../examples).
|
||||
|
||||
### Check if the invoker of the contract is the owning address
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ before the switch to single-node mode.
|
|||
- `docker`
|
||||
- `docker-compose`
|
||||
- `go` compiler
|
||||
|
||||
#### Instructions
|
||||
You can use existing docker-compose file located in `.docker/docker-compose.yml`:
|
||||
```bash
|
||||
|
|
205
docs/neox.md
Normal file
205
docs/neox.md
Normal file
|
@ -0,0 +1,205 @@
|
|||
# NeoGo support for neox (cross-chain Neo functionality)
|
||||
|
||||
NeoGo has full support for neox-2.x functionality integrated in the node, it
|
||||
doesn't require a separate build or code branch and it's completely controlled
|
||||
with two configuration options.
|
||||
|
||||
## What is neox
|
||||
|
||||
Neox is an extension of original Neo 2 node originally implemented in neox-2.x
|
||||
branch of C# implementation (and then presented in the 2.11.0 official
|
||||
release). It includes the following main changes:
|
||||
* local state root generation for contract storages based on MPT
|
||||
* consensus updates for state root exchange between CNs and generation of
|
||||
verified (signed by CNs) state root
|
||||
* P2P protocol updates for state root distribution
|
||||
* RPC protocol updates for state status data and proofs generation
|
||||
* two new key recovery syscalls for smart contracts
|
||||
|
||||
Most of these changes are pure extensions to Neo 2 protocol, but consensus
|
||||
changes are incompatible with regular Neo 2 nodes. The idea is that we have
|
||||
now some state reference for each block that can be used by other chains
|
||||
(along with proof paths for individual key-value pairs if needed) and at the
|
||||
same time we're able to check non-Neo signatures using new key recovery
|
||||
functionality that is available for two curves: Secp256r1 and Secp256k1.
|
||||
|
||||
### How local state is being generated and what it covers
|
||||
|
||||
Any full node processing blocks can now generate state root information
|
||||
locally using Merkle Patricia Trie (MPT). It's used for any key-value pairs
|
||||
stored in the database with prefix of `ST_Storage` which is used for contracts
|
||||
data storage. Basically, anything contracts save using `Neo.Storage.Put`
|
||||
syscall gets accounted for.
|
||||
|
||||
Each value gets a leaf node in MPT and the key for that value is encoded in
|
||||
branch and extension nodes according to prefix data. Any node in MPT can be
|
||||
hashed and the root node hash naturally depends on every other hash in the
|
||||
trie, so this single hash value represents current state of the trie and is
|
||||
called state root hash. Any change to the trie state
|
||||
(adding/deleting/changing key-value pairs) changes state root hash.
|
||||
|
||||
But even though this state root data can be computed at every full node it
|
||||
can't be considered authoritative until it's signed by network-trusted
|
||||
entities which are consensus nodes.
|
||||
|
||||
### How and why consensus process was changed in neox
|
||||
|
||||
Consensus nodes now exchange state root signatures for height N-1 during
|
||||
consensus process for block N with PrepareRequest and PrepareResponse
|
||||
messages.
|
||||
|
||||
If all goes well CNs collect enough signatures for this state root data and
|
||||
generate (and broadcast) a `stateroot` message along with regular Commit
|
||||
consensus messages. Effectively this creates another signed chain on the
|
||||
network that is always one block behind from the main chain because the
|
||||
process of block `N` creation confirms the state resulting from processing of
|
||||
block `N - 1`.
|
||||
|
||||
### How P2P protocol was changed
|
||||
|
||||
P2P protocol was extended with `getroots`, `roots` and `stateroot`
|
||||
messages for state root data exchange. Simple `stateroot` message is what
|
||||
consensus nodes generate to broadcast signed state root data, it's accepted by
|
||||
all nodes, they check it, verify its signature and save locally (to do that
|
||||
they have to have confirmed state root for the previous block). It's somewhat
|
||||
similar to block announcement, but as this message is rather small, `inv` is
|
||||
not being used.
|
||||
|
||||
But this message might get lost or some new node may join the network and want
|
||||
to get verification for its state, so there has to be some possibility for
|
||||
state root requests and replies and that's what `getroots`/`roots` pair is
|
||||
for. In general it's expected that the node would synchronize state roots the
|
||||
same way it synchronizes blocks, always trying to be up to date with the
|
||||
network. From this synchronization comes the concept of "state height" which
|
||||
represents the latest verified state root known to the node.
|
||||
|
||||
### How RPC protocol was changed
|
||||
|
||||
RPC got extended with four new methods: `getproof`, `getstateheight`,
|
||||
`getstateroot` and `verifyproof`.
|
||||
|
||||
`getstateheight` and `getstateroot` are easy, the first one allows to get
|
||||
current node's block and state heights, while the second one returns state
|
||||
root data for the specified (by index or by hash) block. State root data
|
||||
basically mirrors the one exchanged via P2P protocol (version, previous state
|
||||
root message hash and current state root hash), but also contains an
|
||||
additional flag to specify if the node has a verification (signature) for this
|
||||
state root. If the state is verified then the node also includes witness data
|
||||
for this state root which use the same format transaction's witnesses use.
|
||||
|
||||
`getproof` and `verifyproof` methods are a bit more special as they allow you
|
||||
to prove that some key-value pair exists in Neo state DB without having whole
|
||||
state DB (like when you're operating on a different chain or when you're
|
||||
working as a light node). This works via MPT path encoding from the root node
|
||||
to the particular leaf (value) node you're interested in (that contains some
|
||||
token balance for example). Using this path data it's easy to regenerate a
|
||||
part of MPT corresponding to that key-value pair locally and recalculate
|
||||
MPT hashes for that trie. If the top-level hash matches verified root hash
|
||||
then you have a proof that the key-value pair is a part of the state DB shared
|
||||
by all proper Neo nodes.
|
||||
|
||||
So `getproof` method returns this path from the root node to the given
|
||||
key. It can then be used to verify the proof locally or can be used to send
|
||||
this proof to some trusted RPC node to verify it using `verifyproof` method
|
||||
that returns value for that key in case of success.
|
||||
|
||||
### What are these new neox syscalls
|
||||
|
||||
Two syscalls were added along with other neox changes:
|
||||
"Neo.Cryptography.Secp256k1Recover" and "Neo.Cryptography.Secp256r1Recover",
|
||||
they're similar in their function and interface, but using different elliptic
|
||||
curves for their operation. The first one uses SEC-standardized Koblitz curve
|
||||
widely known for its usage in Bitcoin and the second one operates on regular
|
||||
SEC-standardized curve that is used by Neo.
|
||||
|
||||
Both of these syscalls allow to recover public key from the given signature
|
||||
(r, s) on the given message hash with a help of a flag denoting Y's least
|
||||
significant bit in decompression algorithm. The return value is a byte
|
||||
array representing recovered public key (64 bytes containing 32-byte X and Y)
|
||||
in case of success and zero-length byte array in case of failure.
|
||||
|
||||
This functionality allows you to check message signatures in smart contract,
|
||||
the key recovered can be compared with an expected one or be hashed and
|
||||
compared with an expected key hash (depending on what data is provided by the
|
||||
other blockchain).
|
||||
|
||||
## How neox is supported in NeoGo
|
||||
|
||||
NeoGo has full support for functionality outlined above. Syscalls are
|
||||
available via interop wrappers in `crypto` packages and RPC client contains
|
||||
methods to work with new RPC protocol extensions. Client-side support is
|
||||
always available, but NeoGo node's behavior is controlled by two configuration
|
||||
options: EnableStateRoot and StateRootEnableIndex, the first one is boolean
|
||||
and the second one is integer. If not specified in the configuration the first
|
||||
one has a default of false and the second has a default value of 0.
|
||||
|
||||
EnableStateRoot controls state root generation and processing
|
||||
functionality. NeoGo is able to operate both on stateroot-enabled and classic
|
||||
networks, so this is the main switch between these two modes.
|
||||
|
||||
With EnableStateRoot set to false the node works in classic mode:
|
||||
* no local state root is being generated
|
||||
* consensus process operates using classic message formats not including
|
||||
state root data
|
||||
* stateroot-related P2P messages are ignored
|
||||
* stateroot-related RPC calls are available, but always return an error
|
||||
* recovery syscalls are unavailable to contracts
|
||||
* StateRootEnableIndex setting is ignored
|
||||
|
||||
With EnableStateRoot set to true things change and the node operates with full
|
||||
neox support, but a StateRootEnableIndex setting may additionally affect its
|
||||
P2P-processing behavior. `getroots` requests for blocks with height less than
|
||||
StateRootEnableIndex are ignored, `roots` messages are only processed for
|
||||
blocks higher than StateRootEnableIndex and the node doesn't actively try to
|
||||
synchronize its state height until its block height reaches
|
||||
StateRootEnableIndex. This setting is made for network upgrades when there are
|
||||
no confirmed state roots for old blocks and they'll never be properly
|
||||
confirmed.
|
||||
|
||||
### Things you can do
|
||||
|
||||
#### Running a classic network
|
||||
|
||||
Doesn't require changing anything, just upgrade the node and run it.
|
||||
|
||||
#### Running new stateroot-enabled network
|
||||
|
||||
Setting EnableStateRoot to true and not setting StateRootEnableIndex is a good
|
||||
choice for a new private network as it gives you all the functionality from
|
||||
block zero. Note that all consensus nodes must be using this settings
|
||||
combination for successful operation.
|
||||
|
||||
#### Adding stateroot functionality to existing network
|
||||
|
||||
If you already have some network and you need it to continue working, but want
|
||||
to upgrade it with neox functionality you need to:
|
||||
* prepare a current dump of network's blocks
|
||||
* upgrade all consensus nodes with NeoGo 0.76.0+
|
||||
* stop all of them
|
||||
* change their configuration, setting EnableStateRoot to true and
|
||||
StateRootEnableIndex to some block in the future (not far away from current
|
||||
network's height)
|
||||
* remove CNs local databases
|
||||
* import blocks from the previously generated dump on all CNs
|
||||
* start all CNs
|
||||
|
||||
This can be optimized to reduce network's downtime by doing block
|
||||
dumps/restores with old CNs still running, but you have to regenerate local
|
||||
databases with stateroot enabled for correct operation.
|
||||
|
||||
### Things you shouldn't do
|
||||
|
||||
#### Randomly changing EnableStateRoot setting
|
||||
|
||||
Switching EnableStateRoot on and off without full block resynchronization may
|
||||
lead to unexpected results on any full node (independent of whether it's a
|
||||
consensus node or not) because with EnableStateRoot set to true an MPT
|
||||
structure is initialized using local DB and if that DB doesn't have correct
|
||||
MPT state it will fail. If you're changing this setting in any way --- restore
|
||||
the DB from block dump.
|
||||
|
||||
#### Running mixed consensus nodes set
|
||||
|
||||
All consensus nodes should agree on the protocol being used, either all of
|
||||
them use state roots, or all of them don't. Mixing two types of nodes will
|
||||
lead to consensus failures.
|
339
docs/notifications.md
Normal file
339
docs/notifications.md
Normal file
|
@ -0,0 +1,339 @@
|
|||
# Notification subsystem
|
||||
|
||||
Original motivation, requirements and general solution strategy are described
|
||||
in the issue #895.
|
||||
|
||||
This extension allows a websocket client to subscribe to various events and
|
||||
receive them as JSON-RPC notifications from the server.
|
||||
|
||||
## Events
|
||||
Currently supported events:
|
||||
* new block added
|
||||
Contents: block.
|
||||
Filters: none.
|
||||
* new transaction in the block
|
||||
Contents: transaction.
|
||||
Filters: type.
|
||||
* notification generated during execution
|
||||
Contents: container hash, contract script hash, stack item.
|
||||
Filters: contract script hash.
|
||||
* transaction executed
|
||||
Contents: application execution result.
|
||||
Filters: VM state.
|
||||
|
||||
## Ordering and persistence guarantees
|
||||
* new block is only announced after its processing is complete and the chain
|
||||
is updated to the new height
|
||||
* no disk-level persistence guarantees are given
|
||||
* new in-block transaction is announced after block processing, but before
|
||||
announcing the block itself
|
||||
* transaction notifications are only announced for successful transactions
|
||||
* all announcements are being done in the same order they happen on the chain
|
||||
At first transaction execution is announced, then followed by notifications
|
||||
generated during this execution, then followed by transaction announcement.
|
||||
Transaction announcements are ordered the same way they're in the block.
|
||||
* unsubscription may not cancel pending, but not yet sent events
|
||||
|
||||
## Subscription management
|
||||
|
||||
Errors are not described down below, but they can be returned as standard
|
||||
JSON-RPC errors (most often caused by invalid parameters).
|
||||
|
||||
### `subscribe` method
|
||||
|
||||
Parameters: event stream name, stream-specific filter rules hash (can be
|
||||
omitted if empty).
|
||||
|
||||
Recognized stream names:
|
||||
* `block_added`
|
||||
No filter parameters defined.
|
||||
* `transaction_added`
|
||||
Filter: `type` as a string containing standard transaction types
|
||||
(MinerTransaction, InvocationTransaction, etc)
|
||||
* `notification_from_execution`
|
||||
Filter: `contract` field containing string with hex-encoded Uint160 (LE
|
||||
representation).
|
||||
* `transaction_executed`
|
||||
Filter: `state` field containing `HALT` or `FAULT` string for successful
|
||||
and failed executions respectively.
|
||||
|
||||
Response: returns subscription ID (string) as a result. This ID can be used to
|
||||
cancel this subscription and has no meaning other than that.
|
||||
|
||||
Example request (subscribe to notifications from contract
|
||||
0x6293a440ed80a427038e175a507d3def1e04fb67 generated when executing
|
||||
transactions):
|
||||
|
||||
```
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "subscribe",
|
||||
"params": ["notification_from_execution", {"contract": "6293a440ed80a427038e175a507d3def1e04fb67"}],
|
||||
"id": 1
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Example response:
|
||||
|
||||
```
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": "55aaff00"
|
||||
}
|
||||
```
|
||||
|
||||
### `unsubscribe` method
|
||||
|
||||
Parameters: subscription ID as a string.
|
||||
|
||||
Response: boolean true.
|
||||
|
||||
Example request (unsubscribe from "55aaff00"):
|
||||
|
||||
```
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "unsubscribe",
|
||||
"params": ["55aaff00"],
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
Example response:
|
||||
|
||||
```
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": true
|
||||
}
|
||||
```
|
||||
|
||||
## Events
|
||||
|
||||
Events are sent as JSON-RPC notifications from the server with `method` field
|
||||
being used for notification names. Notification names are identical to stream
|
||||
names described for `subscribe` method with one important addition for
|
||||
`event_missed` which can be sent for any subscription to signify that some
|
||||
events were not delivered (usually when client isn't able to keep up with
|
||||
event flow).
|
||||
|
||||
Verbose responses for various structures like blocks and transactions are used
|
||||
to simplify working with notifications on client side. Returned structures
|
||||
mostly follow the one used by standard Neo RPC calls, but may have some minor
|
||||
differences.
|
||||
|
||||
### `block_added` notification
|
||||
As a first parameter (`params` section) contains block converted to JSON
|
||||
structure which is similar to verbose `getblock` response but with the
|
||||
following differences:
|
||||
* it doesn't have `size` field (you can calculate it client-side)
|
||||
* it doesn't have `nextblockhash` field (it's supposed to be the latest one
|
||||
anyway)
|
||||
* it doesn't have `confirmations` field (see previous)
|
||||
* transactions contained don't have `net_fee` and `sys_fee` fields
|
||||
|
||||
No other parameters are sent.
|
||||
|
||||
Example:
|
||||
```
|
||||
{
|
||||
"jsonrpc" : "2.0",
|
||||
"method" : "block_added",
|
||||
"params" : [
|
||||
{
|
||||
"previousblockhash" : "0x33f3e0e24542b2ec3b6420e6881c31f6460a39a4e733d88f7557cbcc3b5ed560",
|
||||
"nextconsensus" : "AZ81H31DMWzbSnFDLFkzh9vHwaDLayV7fU",
|
||||
"index" : 205,
|
||||
"nonce" : "0000000000000457",
|
||||
"version" : 0,
|
||||
"tx" : [
|
||||
{
|
||||
"version" : 0,
|
||||
"attributes" : [],
|
||||
"txid" : "0xf9adfde059810f37b3d0686d67f6b29034e0c669537df7e59b40c14a0508b9ed",
|
||||
"size" : 10,
|
||||
"vin" : [],
|
||||
"type" : "MinerTransaction",
|
||||
"scripts" : [],
|
||||
"vout" : []
|
||||
},
|
||||
{
|
||||
"version" : 1,
|
||||
"txid" : "0x93670859cc8a42f6ea994869c944879678d33d7501d388f5a446a8c7de147df7",
|
||||
"attributes" : [],
|
||||
"size" : 60,
|
||||
"script" : "097465737476616c756507746573746b657952c103507574676f20ccfbd5f01d5b9633387428b8bab95a9e78c2",
|
||||
"vin" : [],
|
||||
"scripts" : [],
|
||||
"type" : "InvocationTransaction",
|
||||
"vout" : []
|
||||
}
|
||||
],
|
||||
"time" : 1586154525,
|
||||
"hash" : "0x48fba8aebf88278818a3dc0caecb230873d1d4ce1ea8bf473634317f94a609e5",
|
||||
"script" : {
|
||||
"invocation" : "4047a444a51218ac856f1cbc629f251c7c88187910534d6ba87847c86a9a73ed4951d203fd0a87f3e65657a7259269473896841f65c0a0c8efc79d270d917f4ff640435ee2f073c94a02f0276dfe4465037475e44e1c34c0decb87ec9c2f43edf688059fc4366a41c673d72ba772b4782c39e79f01cb981247353216d52d2df1651140527eb0dfd80a800fdd7ac8fbe68fc9366db2d71655d8ba235525a97a69a7181b1e069b82091be711c25e504a17c3c55eee6e76e6af13cb488fbe35d5c5d025c34041f39a02ebe9bb08be0e4aaa890f447dc9453209bbfb4705d8f2d869c2b55ee2d41dbec2ee476a059d77fb7c26400284328d05aece5f3168b48f1db1c6f7be0b",
|
||||
"verification" : "532102103a7f7dd016558597f7960d27c516a4394fd968b9e65155eb4b013e4040406e2102a7bc55fe8684e0119768d104ba30795bdcc86619e864add26156723ed185cd622102b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc22103d90c07df63e690ce77912e10ab51acc944b66860237b608c4f8f8309e71ee69954ae"
|
||||
},
|
||||
"merkleroot" : "0x9d922c5cfd4c8cd1da7a6b2265061998dc438bd0dea7145192e2858155e6c57a"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### `transaction_added` notification
|
||||
|
||||
In the first parameter (`params` section) contains transaction converted to
|
||||
JSON which is similar to verbose `getrawtransaction` response, but with the
|
||||
following differences:
|
||||
* `net_fee` and `sys_fee` fields are always missing
|
||||
* block's metadata is missing (`blockhash`, `confirmations`, `blocktime`)
|
||||
|
||||
No other parameters are sent.
|
||||
|
||||
Example:
|
||||
```
|
||||
{
|
||||
"params" : [
|
||||
{
|
||||
"vin" : [],
|
||||
"scripts" : [],
|
||||
"attributes" : [],
|
||||
"txid" : "0x93670859cc8a42f6ea994869c944879678d33d7501d388f5a446a8c7de147df7",
|
||||
"size" : 60,
|
||||
"vout" : [],
|
||||
"type" : "InvocationTransaction",
|
||||
"version" : 1,
|
||||
"script" : "097465737476616c756507746573746b657952c103507574676f20ccfbd5f01d5b9633387428b8bab95a9e78c2"
|
||||
}
|
||||
],
|
||||
"method" : "transaction_added",
|
||||
"jsonrpc" : "2.0"
|
||||
}
|
||||
```
|
||||
|
||||
### `notification_from_execution` notification
|
||||
|
||||
Contains three parameters: container hash (hex-encoded LE Uint256 in a
|
||||
string), contract script hash (hex-encoded LE Uint160 in a string) and stack
|
||||
item (encoded the same way as `state` field contents for notifications from
|
||||
`getapplicationlog` response).
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
{
|
||||
"method" : "notification_from_execution",
|
||||
"jsonrpc" : "2.0",
|
||||
"params" : [
|
||||
{
|
||||
"state" : {
|
||||
"value" : [
|
||||
{
|
||||
"value" : "636f6e74726163742063616c6c",
|
||||
"type" : "ByteArray"
|
||||
},
|
||||
{
|
||||
"value" : "507574",
|
||||
"type" : "ByteArray"
|
||||
},
|
||||
{
|
||||
"value" : [
|
||||
{
|
||||
"type" : "ByteArray",
|
||||
"value" : "746573746b6579"
|
||||
},
|
||||
{
|
||||
"value" : "7465737476616c7565",
|
||||
"type" : "ByteArray"
|
||||
}
|
||||
],
|
||||
"type" : "Array"
|
||||
}
|
||||
],
|
||||
"type" : "Array"
|
||||
},
|
||||
"contract" : "0xc2789e5ab9bab828743833965b1df0d5fbcc206f"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### `transaction_executed` notification
|
||||
|
||||
Contains the same result as from `getapplicationlog` method in the first
|
||||
parameter and no other parameters. One difference from `getapplicationlog` is
|
||||
that it always contains zero in the `contract` field.
|
||||
|
||||
Example:
|
||||
```
|
||||
{
|
||||
"method" : "transaction_executed",
|
||||
"params" : [
|
||||
{
|
||||
"executions" : [
|
||||
{
|
||||
"vmstate" : "HALT",
|
||||
"contract" : "0x0000000000000000000000000000000000000000",
|
||||
"notifications" : [
|
||||
{
|
||||
"state" : {
|
||||
"type" : "Array",
|
||||
"value" : [
|
||||
{
|
||||
"type" : "ByteArray",
|
||||
"value" : "636f6e74726163742063616c6c"
|
||||
},
|
||||
{
|
||||
"type" : "ByteArray",
|
||||
"value" : "507574"
|
||||
},
|
||||
{
|
||||
"value" : [
|
||||
{
|
||||
"value" : "746573746b6579",
|
||||
"type" : "ByteArray"
|
||||
},
|
||||
{
|
||||
"type" : "ByteArray",
|
||||
"value" : "7465737476616c7565"
|
||||
}
|
||||
],
|
||||
"type" : "Array"
|
||||
}
|
||||
]
|
||||
},
|
||||
"contract" : "0xc2789e5ab9bab828743833965b1df0d5fbcc206f"
|
||||
}
|
||||
],
|
||||
"gas_consumed" : "1.048",
|
||||
"stack" : [
|
||||
{
|
||||
"type" : "Integer",
|
||||
"value" : "1"
|
||||
}
|
||||
],
|
||||
"trigger" : "Application"
|
||||
}
|
||||
],
|
||||
"txid" : "0x93670859cc8a42f6ea994869c944879678d33d7501d388f5a446a8c7de147df7"
|
||||
}
|
||||
],
|
||||
"jsonrpc" : "2.0"
|
||||
}
|
||||
```
|
||||
|
||||
### `event_missed` notification
|
||||
|
||||
Never has any parameters. Example:
|
||||
|
||||
```
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "event_missed",
|
||||
"params": []
|
||||
}
|
||||
```
|
|
@ -45,6 +45,11 @@ CircleCI to build it (or them if we're to have more than one OS or
|
|||
architecture), download it and rename to `neo-go-$OS-$ARCH`, at the moment
|
||||
that should be `neo-go-linux-amd64`.
|
||||
|
||||
## Build and push image to DockerHub
|
||||
|
||||
Manually trigger "Push images to DockerHub" workflow from master branch for
|
||||
the release tag.
|
||||
|
||||
## Make a proper Github release
|
||||
|
||||
Edit an automatically-created release on Github, copy things from changelog
|
||||
|
|
343
docs/rpc.md
343
docs/rpc.md
|
@ -46,16 +46,21 @@ which would yield the response:
|
|||
| `getclaimable` |
|
||||
| `getconnectioncount` |
|
||||
| `getcontractstate` |
|
||||
| `getminimumnetworkfee` |
|
||||
| `getnep5balances` |
|
||||
| `getnep5transfers` |
|
||||
| `getpeers` |
|
||||
| `getproof` |
|
||||
| `getrawmempool` |
|
||||
| `getrawtransaction` |
|
||||
| `getstateheight` |
|
||||
| `getstateroot` |
|
||||
| `getstorage` |
|
||||
| `gettransactionheight` |
|
||||
| `gettxout` |
|
||||
| `getunclaimed` |
|
||||
| `getunspents` |
|
||||
| `getutxotransfers` |
|
||||
| `getvalidators` |
|
||||
| `getversion` |
|
||||
| `invoke` |
|
||||
|
@ -64,6 +69,87 @@ which would yield the response:
|
|||
| `sendrawtransaction` |
|
||||
| `submitblock` |
|
||||
| `validateaddress` |
|
||||
| `verifyproof` |
|
||||
|
||||
#### Implementation notices
|
||||
|
||||
##### `getaccountstate`
|
||||
|
||||
The order of assets in `balances` section may differ from the one returned by
|
||||
C# implementation. Assets can still be identified by their hashes so it
|
||||
shouldn't be an issue.
|
||||
|
||||
##### `getapplicationlog`
|
||||
|
||||
Error handling for incorrect stack items differs with C# implementation. C#
|
||||
implementation substitutes `stack` and `state` arrays with "error: recursive
|
||||
reference" string if there are any invalid items. NeoGo never does this, for
|
||||
bad `state` items it uses byte array susbstitute with message "bad
|
||||
notification: ..." (may vary depending on the problem), for incorrect `stack`
|
||||
items it just omits them (still returning valid ones).
|
||||
|
||||
##### `getassetstate`
|
||||
|
||||
It returns "NEO" for NEO and "NEOGas" for GAS in the `name` field instead of
|
||||
language-aware JSON structures.
|
||||
|
||||
##### `getblock` and `getrawtransaction`
|
||||
|
||||
In their verbose outputs neo-go can omit some fields with default values for
|
||||
transactions, this includes:
|
||||
* zero "nonce" for Miner transactions (usually nonce is not zero)
|
||||
* zero "gas" for Invocation transactions (most of the time it is zero).
|
||||
|
||||
##### `getclaimable`
|
||||
|
||||
`claimable` array ordering differs, neo-go orders entries there by the
|
||||
`end_height` field, while C# implementation orders by `txid`.
|
||||
|
||||
##### `getcontractstate`
|
||||
|
||||
C# implementation doesn't return `Payable` flag in its output, neo-go has
|
||||
`is_payable` field in `properties` for that.
|
||||
|
||||
##### `getnep5transfers`
|
||||
|
||||
`received` and `sent` entries are sorted differently, C# node uses
|
||||
chronological order and neo-go uses reverse chronological order (which is
|
||||
important for paging support, see Extensions section down below).
|
||||
|
||||
##### `getrawmempool`
|
||||
|
||||
neo-go doesn't support boolean parameter to `getrawmempool` for unverified
|
||||
transactions request because neo-go actually never stores unverified
|
||||
transactions in the mempool.
|
||||
|
||||
##### `getunclaimed`
|
||||
|
||||
Numeric results are wrapped into strings in neo-go (the same way fees are
|
||||
encoded) to prevent floating point rounding errors.
|
||||
|
||||
##### `getunspents`
|
||||
|
||||
neo-go uses standard "0xhash" syntax for `txid` and `asset_hash` fields
|
||||
whereas C# module doesn't add "0x" prefix. The order of `balance` or `unspent`
|
||||
entries can differ. neo-go returns all UTXO assets while C# module only tracks
|
||||
and returns NEO and GAS.
|
||||
|
||||
##### `getutxotransfers`
|
||||
|
||||
`transactions` are sorted differently, C# node uses chronological order and
|
||||
neo-go uses reverse chronological order (which is important for paging
|
||||
support, see Extensions section down below).
|
||||
|
||||
|
||||
##### `invokefunction` and `invoke`
|
||||
|
||||
neo-go's implementation of `invokefunction` and `invoke` does not return `tx`
|
||||
field in the answer because that requires signing the transaction with some
|
||||
key in the server which doesn't fit the model of our node-client interactions.
|
||||
Lacking this signature the transaction is almost useless, so there is no point
|
||||
in returning it.
|
||||
|
||||
Both methods also don't currently support arrays in function parameters.
|
||||
|
||||
### Unsupported methods
|
||||
|
||||
|
@ -86,17 +172,258 @@ and we're not accepting issues related to them.
|
|||
| `sendmany` | Not applicable to neo-go, see `claimgas` comment |
|
||||
| `sendtoaddress` | Not applicable to neo-go, see `claimgas` comment |
|
||||
|
||||
#### Implementation notices
|
||||
### Extensions
|
||||
|
||||
##### `invokefunction` and `invoke`
|
||||
Some additional extensions are implemented as a part of this RPC server.
|
||||
|
||||
neo-go's implementation of `invokefunction` and `invoke` does not return `tx`
|
||||
field in the answer because that requires signing the transaction with some
|
||||
key in the server which doesn't fit the model of our node-client interactions.
|
||||
Lacking this signature the transaction is almost useless, so there is no point
|
||||
in returning it.
|
||||
#### Limits and paging for getnep5transfers and getutxotransfers
|
||||
|
||||
Both methods also don't currently support arrays in function parameters.
|
||||
Both `getnep5transfers` and `getutxotransfers` RPC calls never return more than
|
||||
1000 results for one request (within specified time frame). You can pass your
|
||||
own limit via an additional parameter and then use paging to request the next
|
||||
batch of transfers.
|
||||
|
||||
Example requesting 10 events for address AYC7wn4xb8SEeYpgPXHHjLr3gBuWbgAC3Q
|
||||
within 0-1600094189 timestamps:
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 5, "method": "getnep5transfers", "params":
|
||||
["AYC7wn4xb8SEeYpgPXHHjLr3gBuWbgAC3Q", 0, 1600094189, 10] }
|
||||
```
|
||||
|
||||
Get the next 10 transfers for the same account within the same time frame:
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 5, "method": "getnep5transfers", "params":
|
||||
["AYC7wn4xb8SEeYpgPXHHjLr3gBuWbgAC3Q", 0, 1600094189, 10, 1] }
|
||||
```
|
||||
|
||||
#### getalltransfertx call
|
||||
|
||||
In addition to regular `getnep5transfers` and `getutxotransfers` RPC calls
|
||||
`getalltransfertx` is provided to return both NEP5 and UTXO events for account
|
||||
in a single stream of events. These events are grouped by transaction and an
|
||||
additional metadata like fees is provided. It has the same parameters as
|
||||
`getnep5transfers`, but limits and paging is applied to transactions instead
|
||||
of transfer events. UTXO inputs and outputs are provided by `elements` array,
|
||||
while NEP5 transfer events are contained in `events` array.
|
||||
|
||||
Example request:
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 5, "method": "getalltransfertx", "params":
|
||||
["AYC7wn4xb8SEeYpgPXHHjLr3gBuWbgAC3Q", 0, 1600094189, 2] }
|
||||
|
||||
```
|
||||
|
||||
Reply:
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc" : "2.0",
|
||||
"result" : [
|
||||
{
|
||||
"txid" : "0x1cb7e089bb52cabb35c480de9d99c41c6fea7f5a276b41d71ab3fc7c470dcb74",
|
||||
"net_fee" : "0",
|
||||
"events" : [
|
||||
{
|
||||
"asset" : "3a4acd3647086e7c44398aac0349802e6a171129",
|
||||
"type" : "send",
|
||||
"address" : "ALuZLuuDssJqG2E4foANKwbLamYHuffFjg",
|
||||
"value" : "20000000000"
|
||||
}
|
||||
],
|
||||
"sys_fee" : "0",
|
||||
"timestamp" : 1600094117,
|
||||
"block_index" : 6163114
|
||||
},
|
||||
{
|
||||
"block_index" : 6162995,
|
||||
"timestamp" : 1600092165,
|
||||
"sys_fee" : "0",
|
||||
"events" : [
|
||||
{
|
||||
"asset" : "3a4acd3647086e7c44398aac0349802e6a171129",
|
||||
"address" : "ALuZLuuDssJqG2E4foANKwbLamYHuffFjg",
|
||||
"type" : "receive",
|
||||
"value" : "20000000000"
|
||||
}
|
||||
],
|
||||
"net_fee" : "0",
|
||||
"txid" : "0xc8b45480ade5395a4a239bb44eea6d86113f32090c4854b0c4aeee1b9485edab"
|
||||
}
|
||||
],
|
||||
"id" : 5
|
||||
}
|
||||
```
|
||||
|
||||
Another request:
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 5, "method": "getalltransfertx", "params":
|
||||
["AKJL9HwrFGdic9GTTXrdaHuNYa5oxqioRY", 0, 1600079056, 2, 13] }
|
||||
```
|
||||
|
||||
Reply:
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc" : "2.0",
|
||||
"id" : 5,
|
||||
"result" : [
|
||||
{
|
||||
"elements" : [
|
||||
{
|
||||
"asset" : "602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7",
|
||||
"address" : "AZCcft1uYtmZXxzHPr5tY7L6M85zG7Dsrv",
|
||||
"value" : "0.00000831",
|
||||
"type" : "input"
|
||||
},
|
||||
{
|
||||
"value" : "0.0000083",
|
||||
"type" : "output",
|
||||
"asset" : "602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7",
|
||||
"address" : "AZCcft1uYtmZXxzHPr5tY7L6M85zG7Dsrv"
|
||||
}
|
||||
],
|
||||
"events" : [
|
||||
{
|
||||
"asset" : "1578103c13e39df15d0d29826d957e85d770d8c9",
|
||||
"address" : "AZCcft1uYtmZXxzHPr5tY7L6M85zG7Dsrv",
|
||||
"type" : "receive",
|
||||
"value" : "2380844141430"
|
||||
}
|
||||
],
|
||||
"timestamp" : 1561566911,
|
||||
"net_fee" : "0.00000001",
|
||||
"block_index" : 3929554,
|
||||
"sys_fee" : "0",
|
||||
"txid" : "0xb4f1bdb466d8bd3524502008a0bc1f9342356b4eea67be19d384845c670442a6"
|
||||
},
|
||||
{
|
||||
"txid" : "0xc045c0612b34218b7e5eaee973114af3eff925f859adf23cf953930f667cdc93",
|
||||
"sys_fee" : "0",
|
||||
"block_index" : 3929523,
|
||||
"net_fee" : "0.00000001",
|
||||
"timestamp" : 1561566300,
|
||||
"events" : [
|
||||
{
|
||||
"asset" : "1578103c13e39df15d0d29826d957e85d770d8c9",
|
||||
"address" : "AZCcft1uYtmZXxzHPr5tY7L6M85zG7Dsrv",
|
||||
"type" : "receive",
|
||||
"value" : "2100000000"
|
||||
}
|
||||
],
|
||||
"elements" : [
|
||||
{
|
||||
"asset" : "602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7",
|
||||
"address" : "AZCcft1uYtmZXxzHPr5tY7L6M85zG7Dsrv",
|
||||
"type" : "input",
|
||||
"value" : "0.00000838"
|
||||
},
|
||||
{
|
||||
"value" : "0.00000837",
|
||||
"type" : "output",
|
||||
"address" : "AZCcft1uYtmZXxzHPr5tY7L6M85zG7Dsrv",
|
||||
"asset" : "602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### getblocktransfertx call
|
||||
|
||||
`getblocktransfertx` provides a list of transactions that did some asset
|
||||
transfers in a block (either UTXO or NEP5). It gets a block number or hash as
|
||||
a single parameter and its output format is similar to `getalltransfertx`
|
||||
except for `events` where it doesn't use `address` and `type` fields, but
|
||||
rather provides `from` and `to` (meaning that the asset was moved from `from`
|
||||
to `to` address).
|
||||
|
||||
Example request:
|
||||
|
||||
```json
|
||||
{ "jsonrpc": "2.0", "id": 5, "method": "getblocktransfertx", "params": [6000003]}
|
||||
|
||||
```
|
||||
|
||||
Reply:
|
||||
```json
|
||||
{
|
||||
"id" : 5,
|
||||
"result" : [
|
||||
{
|
||||
"txid" : "0xaec0994211e5d7fd459a4445b113db0102ac79cb90a08b3211b9a9190a6feaa3",
|
||||
"elements" : [
|
||||
{
|
||||
"asset" : "602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7",
|
||||
"type" : "output",
|
||||
"value" : "0.19479178",
|
||||
"address" : "AHwyehUHV8ujVJBN6Tz3jBDuPAHQ1wKU5R"
|
||||
}
|
||||
],
|
||||
"block_index" : 6000003,
|
||||
"timestamp" : 1597295221,
|
||||
"sys_fee" : "0",
|
||||
"net_fee" : "0"
|
||||
},
|
||||
{
|
||||
"sys_fee" : "0",
|
||||
"net_fee" : "0",
|
||||
"elements" : [
|
||||
{
|
||||
"value" : "971",
|
||||
"address" : "AHFvPbmMbxnD6EQQWcope8VWKEMDtG1qTQ",
|
||||
"asset" : "c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b",
|
||||
"type" : "input"
|
||||
},
|
||||
{
|
||||
"address" : "AP18zgg58bK6vZ7MX51XfD63eEEuqKCgJt",
|
||||
"value" : "971",
|
||||
"asset" : "c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b",
|
||||
"type" : "output"
|
||||
}
|
||||
],
|
||||
"block_index" : 6000003,
|
||||
"txid" : "0x6b0888b10b1150d301f749d56b7365b307d814cfd843bd064e68313bb30c9351",
|
||||
"timestamp" : 1597295221
|
||||
},
|
||||
{
|
||||
"sys_fee" : "0",
|
||||
"net_fee" : "0",
|
||||
"block_index" : 6000003,
|
||||
"txid" : "0x6b2220834059710aecfe4b2cbdb56311bbb27ac5d94795c041b5a2e6fb76f96e",
|
||||
"timestamp" : 1597295221,
|
||||
"events" : [
|
||||
{
|
||||
"from" : "AeNAPrVp7ZWtYLaAWvZ3gkKQsJBZUJJz3r",
|
||||
"asset" : "b951ecbbc5fe37a9c280a76cb0ce0014827294cf",
|
||||
"to" : "AVkhaHaxLaboUVFD1Rke5abTJuKAqziCkY",
|
||||
"value" : "69061428"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"jsonrpc" : "2.0"
|
||||
}
|
||||
```
|
||||
|
||||
#### Websocket server
|
||||
|
||||
This server accepts websocket connections on `ws://$BASE_URL/ws` address. You
|
||||
can use it to perform regular RPC calls over websockets (it's supposed to be a
|
||||
little faster than going regular HTTP route) and you can also use it for
|
||||
additional functionality provided only via websockets (like notifications).
|
||||
|
||||
#### Notification subsystem
|
||||
|
||||
Notification subsystem consists of two additional RPC methods (`subscribe` and
|
||||
`unsubscribe` working only over websocket connection) that allow to subscribe
|
||||
to various blockchain events (with simple event filtering) and receive them on
|
||||
the client as JSON-RPC notifications. More details on that are written in the
|
||||
[notifications specification](notifications.md).
|
||||
|
||||
## Reference
|
||||
|
||||
|
|
523
docs/runtime.md
523
docs/runtime.md
|
@ -1,523 +0,0 @@
|
|||
# Runtime
|
||||
A brief overview of NEO smart contract API's that can be used in the neo-go framework.
|
||||
|
||||
# Overview
|
||||
1. [Account]()
|
||||
2. [Asset]()
|
||||
3. [Attribute]()
|
||||
4. [Block]()
|
||||
5. [Blockchain]()
|
||||
6. [Contract]()
|
||||
7. [Crypto]()
|
||||
8. [Engine]()
|
||||
9. [Enumerator]()
|
||||
10. [Iterator]()
|
||||
11. [Header]()
|
||||
12. [Input]()
|
||||
13. [Output]()
|
||||
14. [Runtime]()
|
||||
15. [Storage]()
|
||||
16. [Transaction]()
|
||||
17. [Util]()
|
||||
|
||||
## Account
|
||||
#### GetScriptHash
|
||||
```
|
||||
GetScriptHash(a Account) []byte
|
||||
```
|
||||
Returns the script hash of the given account.
|
||||
|
||||
#### GetVotes
|
||||
```
|
||||
GetVotes(a Account) [][]byte
|
||||
```
|
||||
Returns the the votes (a slice of public keys) of the given account.
|
||||
|
||||
#### GetBalance
|
||||
```
|
||||
GetBalance(a Account, assetID []byte) int
|
||||
```
|
||||
Returns the balance of the given asset id for the given account.
|
||||
|
||||
## Asset
|
||||
#### GetAssetID
|
||||
```
|
||||
GetAssetID(a Asset) []byte
|
||||
```
|
||||
Returns the id of the given asset.
|
||||
|
||||
#### GetAmount
|
||||
```
|
||||
GetAmount(a Asset) int
|
||||
```
|
||||
Returns the amount of the given asset id.
|
||||
|
||||
#### GetAvailable
|
||||
```
|
||||
GetAvailable(a Asset) int
|
||||
```
|
||||
Returns the available amount of the given asset.
|
||||
|
||||
#### GetPrecision
|
||||
```
|
||||
GetPrecision(a Asset) byte
|
||||
```
|
||||
Returns the precision of the given Asset.
|
||||
|
||||
#### GetOwner
|
||||
```
|
||||
GetOwner(a Asset) []byte
|
||||
```
|
||||
Returns the owner of the given asset.
|
||||
|
||||
#### GetAdmin
|
||||
```
|
||||
GetAdmin(a Asset) []byte
|
||||
```
|
||||
Returns the admin of the given asset.
|
||||
|
||||
#### GetIssuer
|
||||
```
|
||||
GetIssuer(a Asset) []byte
|
||||
```
|
||||
Returns the issuer of the given asset.
|
||||
|
||||
#### Create
|
||||
```
|
||||
Create(type byte, name string, amount int, precision byte, owner, admin, issuer []byte)
|
||||
```
|
||||
Creates a new asset on the blockchain.
|
||||
|
||||
#### Renew
|
||||
```
|
||||
Renew(asset Asset, years int)
|
||||
```
|
||||
Renews the given asset as long as the given years.
|
||||
|
||||
## Attribute
|
||||
#### GetUsage
|
||||
```
|
||||
GetUsage(attr Attribute) []byte
|
||||
```
|
||||
Returns the usage of the given attribute.
|
||||
|
||||
#### GetData
|
||||
```
|
||||
GetData(attr Attribute) []byte
|
||||
```
|
||||
Returns the data of the given attribute.
|
||||
|
||||
## Block
|
||||
#### GetTransactionCount
|
||||
```
|
||||
GetTransactionCount(b Block) int
|
||||
```
|
||||
Returns the number of transactions that are recorded in the given block.
|
||||
|
||||
#### GetTransactions
|
||||
```
|
||||
GetTransactions(b Block) []transaction.Transaction
|
||||
```
|
||||
Returns a slice of the transactions that are recorded in the given block.
|
||||
|
||||
#### GetTransaction
|
||||
```
|
||||
GetTransaction(b Block, hash []byte) transaction.Transaction
|
||||
```
|
||||
Returns the transaction by the given hash that is recorded in the given block.
|
||||
|
||||
## Blockchain
|
||||
#### GetHeight
|
||||
```
|
||||
GetHeight() int
|
||||
```
|
||||
Returns the current height of the blockchain.
|
||||
|
||||
#### GetHeader
|
||||
```
|
||||
GetHeader(heightOrHash []interface{}) header.Header
|
||||
```
|
||||
Return the header by the given hash or index.
|
||||
|
||||
#### GetBlock
|
||||
```
|
||||
GetBlock(heightOrHash interface{}) block.Block
|
||||
```
|
||||
Returns the block by the given hash or index.
|
||||
|
||||
#### GetTransaction
|
||||
```
|
||||
GetTransaction(hash []byte) transaction.Transaction
|
||||
```
|
||||
Returns a transaction by the given hash.
|
||||
|
||||
#### GetContract
|
||||
```
|
||||
GetContract(scriptHash []byte) contract.Contract
|
||||
```
|
||||
Returns the contract found by the given script hash.
|
||||
|
||||
#### GetAccount
|
||||
```
|
||||
GetAccount(scriptHash []byte) account.Account
|
||||
```
|
||||
Returns the account found by the given script hash.
|
||||
|
||||
#### GetValiditors
|
||||
```
|
||||
GetValidators() [][]byte
|
||||
```
|
||||
Returns a list of validators public keys.
|
||||
|
||||
#### GetAsset
|
||||
```
|
||||
GetAsset(assetID []byte) asset.Asset
|
||||
```
|
||||
Returns the asset found by the given asset id.
|
||||
|
||||
## Contract
|
||||
#### GetScript
|
||||
```
|
||||
GetScript(c Contract) []byte
|
||||
```
|
||||
Return the script of the given contract.
|
||||
|
||||
#### IsPayable
|
||||
```
|
||||
IsPayable(c Contract) bool
|
||||
```
|
||||
Returns whether the given contract is payable.
|
||||
|
||||
#### GetStorageContext
|
||||
```
|
||||
GetStorageContext(c Contract)
|
||||
```
|
||||
Returns the storage context of the given contract.
|
||||
|
||||
#### Create
|
||||
```
|
||||
Create(
|
||||
script []byte,
|
||||
params []interface{},
|
||||
returnType byte,
|
||||
properties interface{},
|
||||
name,
|
||||
version,
|
||||
author,
|
||||
email,
|
||||
description string)
|
||||
```
|
||||
Creates a new contract on the blockchain.
|
||||
|
||||
#### Migrate
|
||||
```
|
||||
Migrate(
|
||||
script []byte,
|
||||
params []interface{},
|
||||
returnType byte,
|
||||
properties interface{},
|
||||
name,
|
||||
version,
|
||||
author,
|
||||
email,
|
||||
description string)
|
||||
```
|
||||
Migrates a contract on the blockchain.
|
||||
|
||||
#### Destroy
|
||||
```
|
||||
Destroy(c Contract)
|
||||
```
|
||||
Deletes the given contract from the blockchain.
|
||||
|
||||
## Crypto
|
||||
#### SHA1
|
||||
```
|
||||
SHA1(data []byte) []byte
|
||||
```
|
||||
Computes the sha1 hash of the given bytes
|
||||
|
||||
#### SHA256
|
||||
```
|
||||
SHA256(data []byte) []byte
|
||||
```
|
||||
Computes the sha256 hash of the given bytes
|
||||
|
||||
#### Hash256
|
||||
```
|
||||
Hash256(data []byte) []byte
|
||||
```
|
||||
Computes the sha256^2 of the given data.
|
||||
|
||||
#### Hash160
|
||||
```
|
||||
Hash160(data []byte) []byte) []byte
|
||||
```
|
||||
Computes the ripemd160 over the sha256 hash of the given data.
|
||||
|
||||
## Engine
|
||||
#### GetScriptContainer
|
||||
```
|
||||
GetScriptContainer() transaction.Transaction
|
||||
```
|
||||
Returns the transaction that is in the context of the VM execution.
|
||||
|
||||
#### GetExecutingScriptHash
|
||||
```
|
||||
GetExecutingScriptHash() []byte
|
||||
```
|
||||
Returns the script hash of the contract that is currently being executed.
|
||||
|
||||
#### GetCallingScriptHash
|
||||
```
|
||||
GetCallingScriptHash() []byte
|
||||
```
|
||||
Returns the script hash of the contract that has started the execution of the current script.
|
||||
|
||||
#### GetEntryScriptHash
|
||||
```
|
||||
GetEntryScriptHash() []byte
|
||||
```
|
||||
Returns the script hash of the contract that started the execution from the start.
|
||||
|
||||
## Enumerator
|
||||
#### Create
|
||||
```
|
||||
Create(items []inteface{}) Enumerator
|
||||
```
|
||||
Create a enumerator from the given items.
|
||||
|
||||
#### Next
|
||||
```
|
||||
Next(e Enumerator) interface{}
|
||||
```
|
||||
Returns the next item from the given enumerator.
|
||||
|
||||
#### Value
|
||||
```
|
||||
Value(e Enumerator) interface{}
|
||||
```
|
||||
Returns the enumerator value.
|
||||
|
||||
## Iterator
|
||||
#### Create
|
||||
```
|
||||
Create(items []inteface{}) Iterator
|
||||
```
|
||||
Creates an iterator from the given items.
|
||||
|
||||
#### Key
|
||||
```
|
||||
Key(it Iterator) interface{}
|
||||
```
|
||||
Return the key from the given iterator.
|
||||
|
||||
#### Keys
|
||||
```
|
||||
Keys(it Iterator) []interface{}
|
||||
```
|
||||
Returns the iterator's keys
|
||||
|
||||
#### Values
|
||||
```
|
||||
Values(it Iterator) []interface{}
|
||||
```
|
||||
Returns the iterator's values
|
||||
|
||||
## Header
|
||||
#### GetIndex
|
||||
```
|
||||
GetIndex(h Header) int
|
||||
```
|
||||
Returns the height of the given header.
|
||||
|
||||
#### GetHash
|
||||
```
|
||||
GetHash(h Header) []byte
|
||||
```
|
||||
Returns the hash of the given header.
|
||||
|
||||
#### GetPrevHash
|
||||
```
|
||||
GetPrevhash(h Header) []byte
|
||||
```
|
||||
Returns the previous hash of the given header.
|
||||
|
||||
#### GetTimestamp
|
||||
```
|
||||
GetTimestamp(h Header) int
|
||||
```
|
||||
Returns the timestamp of the given header.
|
||||
|
||||
#### GetVersion
|
||||
```
|
||||
GetVersion(h Header) int
|
||||
```
|
||||
Returns the version of the given header.
|
||||
|
||||
#### GetMerkleroot
|
||||
```
|
||||
GetMerkleRoot(h Header) []byte
|
||||
```
|
||||
Returns the merkle root of the given header.
|
||||
|
||||
#### GetConsensusData
|
||||
```
|
||||
GetConsensusData(h Header) int
|
||||
```
|
||||
Returns the consensus data of the given header.
|
||||
|
||||
#### GetNextConsensus
|
||||
```
|
||||
GetNextConsensus(h Header) []byte
|
||||
```
|
||||
Returns the next consensus of the given header.
|
||||
|
||||
## Input
|
||||
#### GetHash
|
||||
```
|
||||
GetHash(in Input) []byte
|
||||
```
|
||||
Returns the hash field of the given input.
|
||||
|
||||
#### GetIndex
|
||||
```
|
||||
GetIndex(in Input) int
|
||||
```
|
||||
Returns the index field of the given input.
|
||||
|
||||
## Output
|
||||
#### GetAssetID
|
||||
```
|
||||
GetAssetId(out Output) []byte
|
||||
```
|
||||
Returns the asset id field of the given output.
|
||||
|
||||
#### GetValue
|
||||
```
|
||||
GetValue(out Output) int
|
||||
```
|
||||
Returns the value field of the given output.
|
||||
|
||||
#### GetScriptHash
|
||||
```
|
||||
GetScriptHash(out Output) []byte
|
||||
```
|
||||
Returns the script hash field of the given output.
|
||||
|
||||
## Runtime
|
||||
#### CheckWitness
|
||||
```
|
||||
CheckWitness(hash []byte) bool
|
||||
```
|
||||
Verifies if the given hash is the hash of the contract owner.
|
||||
|
||||
#### Log
|
||||
```
|
||||
Log(message string)
|
||||
```
|
||||
Logs the given message.
|
||||
|
||||
#### Notify
|
||||
```
|
||||
Notify(args ...interface{}) int
|
||||
```
|
||||
Notify any number of arguments to the VM.
|
||||
|
||||
#### GetTime
|
||||
```
|
||||
GetTime() int
|
||||
```
|
||||
Returns the current time based on the highest block in the chain.
|
||||
|
||||
#### GetTrigger
|
||||
```
|
||||
GetTrigger() byte
|
||||
```
|
||||
Returns the trigger type of the execution.
|
||||
|
||||
#### Serialize
|
||||
```
|
||||
Serialize(item interface{}) []byte
|
||||
```
|
||||
Serialize the given stack item to a slice of bytes.
|
||||
|
||||
#### Deserialize
|
||||
```
|
||||
Deserialize(data []byte) interface{}
|
||||
```
|
||||
Deserializes the given data to a stack item.
|
||||
|
||||
## Storage
|
||||
#### GetContext
|
||||
```
|
||||
GetContext() Context
|
||||
```
|
||||
Returns the current storage context.
|
||||
|
||||
#### Put
|
||||
```
|
||||
Put(ctx Context, key, value []interface{})
|
||||
```
|
||||
Stores the given value at the given key.
|
||||
|
||||
#### Get
|
||||
```
|
||||
Get(ctx Context, key interface{}) interface{}
|
||||
```
|
||||
Returns the value found at the given key.
|
||||
|
||||
#### Delete
|
||||
```
|
||||
Delete(ctx Context, key interface{})
|
||||
```
|
||||
Delete's the given key from storage.
|
||||
|
||||
#### Find
|
||||
```
|
||||
Find(ctx Context, key interface{}) iterator.Iterator
|
||||
```
|
||||
Find returns an iterator key-values that match the given key.
|
||||
|
||||
## Transaction
|
||||
#### GetHash
|
||||
```
|
||||
GetHash(t Transacfion) []byte
|
||||
```
|
||||
Returns the hash for the given transaction.
|
||||
|
||||
#### GetType
|
||||
```
|
||||
GetType(t Transacfion) byte
|
||||
```
|
||||
Returns the type of the given transaction.
|
||||
|
||||
#### GetAttributes
|
||||
```
|
||||
GetAttributes(t Transacfion) []attribute.Attribute
|
||||
```
|
||||
Returns the attributes of the given transaction.
|
||||
|
||||
#### GetReferences
|
||||
```
|
||||
GetReferences(t Transacfion) interface{}
|
||||
```
|
||||
Returns the references of the given transaction.
|
||||
|
||||
#### GetUnspentCoins
|
||||
```
|
||||
GetUnspentCoins(t Transacfion) interface{}
|
||||
```
|
||||
Returns the unspent coins of the given transaction.
|
||||
|
||||
#### GetOutputs
|
||||
```
|
||||
GetOutputs(t Transacfion) []output.Output
|
||||
```
|
||||
Returns the outputs of the given transaction
|
||||
|
||||
#### GetInputs
|
||||
```
|
||||
GetInputs(t Transacfion) []input.Input
|
||||
```
|
||||
Returns the inputs of the given transaction
|
|
@ -58,7 +58,7 @@ func (t Token) Transfer(ctx storage.Context, from []byte, to []byte, amount int)
|
|||
|
||||
// CanTransfer returns the amount it can transfer
|
||||
func (t Token) CanTransfer(ctx storage.Context, from []byte, to []byte, amount int) int {
|
||||
if len(to) != 20 && !IsUsableAddress(from) {
|
||||
if len(to) != 20 || !IsUsableAddress(from) {
|
||||
return -1
|
||||
}
|
||||
|
||||
|
|
9
go.mod
9
go.mod
|
@ -3,10 +3,12 @@ module github.com/nspcc-dev/neo-go
|
|||
require (
|
||||
github.com/Workiva/go-datastructures v1.0.50
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||
github.com/btcsuite/btcd v0.20.1-beta
|
||||
github.com/dgraph-io/badger/v2 v2.0.3
|
||||
github.com/go-redis/redis v6.10.2+incompatible
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/mr-tron/base58 v1.1.2
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200303183127-36d3da79c682
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200925163137-8f3b9ab3b720
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v1.2.1
|
||||
|
@ -21,6 +23,7 @@ require (
|
|||
golang.org/x/text v0.3.0
|
||||
golang.org/x/tools v0.0.0-20180318012157-96caea41033d
|
||||
gopkg.in/abiosoft/ishell.v2 v2.0.0
|
||||
gopkg.in/yaml.v2 v2.2.4
|
||||
)
|
||||
|
||||
go 1.12
|
||||
go 1.13
|
||||
|
|
106
go.sum
106
go.sum
|
@ -1,27 +1,43 @@
|
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/CityOfZion/neo-go v0.62.1-pre.0.20191114145240-e740fbe708f8/go.mod h1:MJCkWUBhi9pn/CrYO1Q3P687y2KeahrOPS9BD9LDGb0=
|
||||
github.com/CityOfZion/neo-go v0.70.1-pre.0.20191209120015-fccb0085941e/go.mod h1:0enZl0az8xA6PVkwzEOwPWVJGqlt/GO4hA4kmQ5Xzig=
|
||||
github.com/CityOfZion/neo-go v0.70.1-pre.0.20191212173117-32ac01130d4c/go.mod h1:JtlHfeqLywZLswKIKFnAp+yzezY4Dji9qlfQKB2OD/I=
|
||||
github.com/CityOfZion/neo-go v0.71.1-pre.0.20200129171427-f773ec69fb84/go.mod h1:FLI526IrRWHmcsO+mHsCbj64pJZhwQFTLJZu+A4PGOA=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Workiva/go-datastructures v1.0.50 h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo=
|
||||
github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
|
||||
github.com/abiosoft/ishell v2.0.0+incompatible h1:zpwIuEHc37EzrsIYah3cpevrIc8Oma7oZPxr03tlmmw=
|
||||
github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg=
|
||||
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db h1:CjPUSXOiYptLbTdr1RceuZgSFDQ7U15ITERUGrUORx8=
|
||||
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
|
||||
|
@ -29,11 +45,22 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
|
|||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI=
|
||||
github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM=
|
||||
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU=
|
||||
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -48,15 +75,12 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||
github.com/go-redis/redis v6.10.2+incompatible h1:SLbqrO/Ik1nhXA5/cbEs1P5MUBo1Qq4ihlNfGnnipPw=
|
||||
github.com/go-redis/redis v6.10.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
@ -65,25 +89,37 @@ github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp
|
|||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
|
@ -91,34 +127,28 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
|||
github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20191205084618-dacb1a30c254 h1:A4OkQDQOSPsJF8qUmqNvFDzmIGALrvOCZrMktllDoKc=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20191205084618-dacb1a30c254/go.mod h1:w1Ln2aT+dBlPhLnuZhBV+DfPEdS2CHWWLp5JTScY3bw=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20191209120240-0d6b7568d9ae h1:T5V1QANlNMKun0EPB3eqg2PTXG4rmLhzDyEiV63kdB0=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20191209120240-0d6b7568d9ae/go.mod h1:3FjXOoHmA51EGfb5GS/HOv7VdmngNRTssSeQ729dvGY=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a h1:ajvxgEe9qY4vvoSmrADqdDx7hReodKTnT2IXN++qZG8=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1 h1:yEx9WznS+rjE0jl0dLujCxuZSIb+UTjF+005TJu/nNI=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200303183127-36d3da79c682 h1:63OWUolW4GcjJR7cThq8hLnMLTwL+sjO3Qf4fo4sx8w=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200303183127-36d3da79c682/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200925163137-8f3b9ab3b720 h1:e/lFQUIPnWErf2yiHjp2HyPhf0nyo3lp4hMm8IlQX1U=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200925163137-8f3b9ab3b720/go.mod h1:I5D0W3tu3epdt2RMCTxS//HDr4S+OHRqajouQTOAHI8=
|
||||
github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0 h1:ftN+59WqxSWz/RCgXYOfhmltOOqU+udsNQSvN6wkFck=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.3 h1:aca3X2aly92ENRbFK+kH6Hd+J9EQ4Eu6XMVoITSIKtc=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.3/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw=
|
||||
github.com/nspcc-dev/rfc6979 v0.1.0 h1:Lwg7esRRoyK1Up/IN1vAef1EmvrBeMHeeEkek2fAJ6c=
|
||||
github.com/nspcc-dev/rfc6979 v0.1.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0 h1:3e1WNxrN60/6N0DW7+UYisLeZJyfqZTNOjeV/toYvOE=
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
|
||||
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -139,27 +169,33 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
|||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73 h1:I2drr5K0tykBofr74ZEGliE/Hf6fNkEbcPyFvsy7wZk=
|
||||
github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191128022950-c6266f4fe8d7 h1:Y17pEjKgx2X0A69WQPGa8hx/Myzu+4NdUxlkZpbAYio=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191128022950-c6266f4fe8d7/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
|
@ -169,36 +205,34 @@ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
|||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -208,17 +242,15 @@ golang.org/x/tools v0.0.0-20180318012157-96caea41033d h1:Xmo0nLTRYewf0eXDvo12nMS
|
|||
golang.org/x/tools v0.0.0-20180318012157-96caea41033d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/abiosoft/ishell.v2 v2.0.0 h1:/J5yh3nWYSSGFjALcitTI9CLE0Tu27vBYHX0srotqOc=
|
||||
gopkg.in/abiosoft/ishell.v2 v2.0.0/go.mod h1:sFp+cGtH6o4s1FtpVPTMcHq2yue+c4DGOVohJCPUzwY=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
|
@ -16,7 +16,8 @@ var (
|
|||
"SHA1", "Hash256", "Hash160",
|
||||
"VerifySignature", "AppCall",
|
||||
"FromAddress", "Equals",
|
||||
"panic",
|
||||
"panic", "DynAppCall",
|
||||
"delete", "Remove",
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -43,8 +44,9 @@ func typeAndValueForField(fld *types.Var) (types.TypeAndValue, error) {
|
|||
default:
|
||||
return types.TypeAndValue{}, fmt.Errorf("could not initialize struct field %s to zero, type: %s", fld.Name(), t)
|
||||
}
|
||||
default:
|
||||
return types.TypeAndValue{Type: t}, nil
|
||||
}
|
||||
return types.TypeAndValue{}, nil
|
||||
}
|
||||
|
||||
// countGlobals counts the global variables in the program to add
|
||||
|
@ -133,16 +135,13 @@ func (f funcUsage) funcUsed(name string) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// hasReturnStmt looks if the given FuncDecl has a return statement.
|
||||
func hasReturnStmt(decl ast.Node) (b bool) {
|
||||
ast.Inspect(decl, func(node ast.Node) bool {
|
||||
if _, ok := node.(*ast.ReturnStmt); ok {
|
||||
b = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return
|
||||
// lastStmtIsReturn checks if last statement of the declaration was return statement..
|
||||
func lastStmtIsReturn(decl *ast.FuncDecl) (b bool) {
|
||||
if l := len(decl.Body.List); l != 0 {
|
||||
_, ok := decl.Body.List[l-1].(*ast.ReturnStmt)
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func analyzeFuncUsage(pkgs map[*types.Package]*loader.PackageInfo) funcUsage {
|
||||
|
|
|
@ -249,9 +249,10 @@ func (c *codegen) convertFuncDecl(file ast.Node, decl *ast.FuncDecl) {
|
|||
|
||||
// Load the arguments in scope.
|
||||
for _, arg := range decl.Type.Params.List {
|
||||
name := arg.Names[0].Name // for now.
|
||||
l := c.scope.newLocal(name)
|
||||
c.emitStoreLocal(l)
|
||||
for _, id := range arg.Names {
|
||||
l := c.scope.newLocal(id.Name)
|
||||
c.emitStoreLocal(l)
|
||||
}
|
||||
}
|
||||
// Load in all the global variables in to the scope of the function.
|
||||
// This is not necessary for syscalls.
|
||||
|
@ -261,8 +262,10 @@ func (c *codegen) convertFuncDecl(file ast.Node, decl *ast.FuncDecl) {
|
|||
|
||||
ast.Walk(c, decl.Body)
|
||||
|
||||
// If this function returns the void (no return stmt) we will cleanup its junk on the stack.
|
||||
if !hasReturnStmt(decl) {
|
||||
// If we have reached the end of the function without encountering `return` statement,
|
||||
// we should clean alt.stack manually.
|
||||
// This can be the case with void and named-return functions.
|
||||
if !lastStmtIsReturn(decl) {
|
||||
c.saveSequencePoint(decl.Body)
|
||||
emit.Opcode(c.prog.BinWriter, opcode.FROMALTSTACK)
|
||||
emit.Opcode(c.prog.BinWriter, opcode.DROP)
|
||||
|
@ -314,38 +317,36 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
|||
case *ast.AssignStmt:
|
||||
multiRet := len(n.Rhs) != len(n.Lhs)
|
||||
c.saveSequencePoint(n)
|
||||
// Assign operations are grouped https://github.com/golang/go/blob/master/src/go/types/stmt.go#L160
|
||||
isAssignOp := token.ADD_ASSIGN <= n.Tok && n.Tok <= token.AND_NOT_ASSIGN
|
||||
if isAssignOp {
|
||||
// RHS can contain exactly one expression, thus there is no need to iterate.
|
||||
ast.Walk(c, n.Lhs[0])
|
||||
ast.Walk(c, n.Rhs[0])
|
||||
c.convertToken(n.Tok)
|
||||
}
|
||||
for i := 0; i < len(n.Lhs); i++ {
|
||||
switch t := n.Lhs[i].(type) {
|
||||
case *ast.Ident:
|
||||
switch n.Tok {
|
||||
case token.ADD_ASSIGN, token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN, token.REM_ASSIGN:
|
||||
c.emitLoadLocal(t.Name)
|
||||
ast.Walk(c, n.Rhs[0]) // can only add assign to 1 expr on the RHS
|
||||
c.convertToken(n.Tok)
|
||||
if n.Tok == token.DEFINE && !multiRet {
|
||||
c.registerDebugVariable(t.Name, n.Rhs[i])
|
||||
}
|
||||
if !isAssignOp && (i == 0 || !multiRet) {
|
||||
ast.Walk(c, n.Rhs[i])
|
||||
}
|
||||
if t.Name == "_" {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.DROP)
|
||||
} else {
|
||||
l := c.scope.loadLocal(t.Name)
|
||||
c.emitStoreLocal(l)
|
||||
case token.DEFINE:
|
||||
if !multiRet {
|
||||
c.registerDebugVariable(t.Name, n.Rhs[i])
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if i == 0 || !multiRet {
|
||||
ast.Walk(c, n.Rhs[i])
|
||||
}
|
||||
|
||||
if t.Name == "_" {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.DROP)
|
||||
} else {
|
||||
l := c.scope.loadLocal(t.Name)
|
||||
c.emitStoreLocal(l)
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
switch expr := t.X.(type) {
|
||||
case *ast.Ident:
|
||||
ast.Walk(c, n.Rhs[i])
|
||||
if !isAssignOp {
|
||||
ast.Walk(c, n.Rhs[i])
|
||||
}
|
||||
typ := c.typeInfo.ObjectOf(expr).Type().Underlying()
|
||||
if strct, ok := typ.(*types.Struct); ok {
|
||||
c.emitLoadLocal(expr.Name) // load the struct
|
||||
|
@ -360,7 +361,9 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
|||
// Assignments to index expressions.
|
||||
// slice[0] = 10
|
||||
case *ast.IndexExpr:
|
||||
ast.Walk(c, n.Rhs[i])
|
||||
if !isAssignOp {
|
||||
ast.Walk(c, n.Rhs[i])
|
||||
}
|
||||
name := t.X.(*ast.Ident).Name
|
||||
c.emitLoadLocal(name)
|
||||
switch ind := t.Index.(type) {
|
||||
|
@ -417,9 +420,22 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
|||
}
|
||||
c.dropItems(cnt)
|
||||
|
||||
// first result should be on top of the stack
|
||||
for i := len(n.Results) - 1; i >= 0; i-- {
|
||||
ast.Walk(c, n.Results[i])
|
||||
if len(n.Results) == 0 {
|
||||
results := c.scope.decl.Type.Results
|
||||
if results.NumFields() != 0 {
|
||||
// function with named returns
|
||||
for i := len(results.List) - 1; i >= 0; i-- {
|
||||
names := results.List[i].Names
|
||||
for j := len(names) - 1; j >= 0; j-- {
|
||||
c.emitLoadLocal(names[j].Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// first result should be on top of the stack
|
||||
for i := len(n.Results) - 1; i >= 0; i-- {
|
||||
ast.Walk(c, n.Results[i])
|
||||
}
|
||||
}
|
||||
|
||||
c.saveSequencePoint(n)
|
||||
|
@ -454,7 +470,6 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
|||
case *ast.SwitchStmt:
|
||||
ast.Walk(c, n.Tag)
|
||||
|
||||
eqOpcode := c.getEqualityOpcode(n.Tag)
|
||||
switchEnd, label := c.generateLabel(labelEnd)
|
||||
|
||||
lastSwitch := c.currentSwitch
|
||||
|
@ -474,7 +489,7 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
|||
for j := range cc.List {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.DUP)
|
||||
ast.Walk(c, cc.List[j])
|
||||
emit.Opcode(c.prog.BinWriter, eqOpcode)
|
||||
c.emitEquality(n.Tag, token.EQL)
|
||||
if j == l-1 {
|
||||
emit.Jmp(c.prog.BinWriter, opcode.JMPIFNOT, lEnd)
|
||||
} else {
|
||||
|
@ -517,6 +532,8 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
|||
c.emitLoadConst(value)
|
||||
} else if tv := c.typeInfo.Types[n]; tv.Value != nil {
|
||||
c.emitLoadConst(tv)
|
||||
} else if n.Name == "nil" {
|
||||
c.emitDefault(new(types.Slice))
|
||||
} else {
|
||||
c.emitLoadLocal(n.Name)
|
||||
}
|
||||
|
@ -599,26 +616,20 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
|||
ast.Walk(c, n.X)
|
||||
ast.Walk(c, n.Y)
|
||||
|
||||
switch {
|
||||
case n.Op == token.ADD:
|
||||
switch n.Op {
|
||||
case token.ADD:
|
||||
// VM has separate opcodes for number and string concatenation
|
||||
if isStringType(tinfo.Type) {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.CAT)
|
||||
} else {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.ADD)
|
||||
}
|
||||
case n.Op == token.EQL:
|
||||
// VM has separate opcodes for number and string equality
|
||||
op := c.getEqualityOpcode(n.X)
|
||||
emit.Opcode(c.prog.BinWriter, op)
|
||||
case n.Op == token.NEQ:
|
||||
// VM has separate opcodes for number and string equality
|
||||
if isStringType(c.typeInfo.Types[n.X].Type) {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.EQUAL)
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NOT)
|
||||
} else {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NUMNOTEQUAL)
|
||||
case token.EQL, token.NEQ:
|
||||
if isExprNil(n.X) || isExprNil(n.Y) {
|
||||
c.prog.Err = errors.New("comparison with `nil` is not supported, use `len(..) == 0` instead")
|
||||
return nil
|
||||
}
|
||||
c.emitEquality(n.X, n.Op)
|
||||
default:
|
||||
c.convertToken(n.Op)
|
||||
}
|
||||
|
@ -964,13 +975,26 @@ func (c *codegen) getLabelOffset(typ labelOffsetType, name string) uint16 {
|
|||
return c.labels[labelWithType{name: name, typ: typ}]
|
||||
}
|
||||
|
||||
func (c *codegen) getEqualityOpcode(expr ast.Expr) opcode.Opcode {
|
||||
func (c *codegen) emitEquality(expr ast.Expr, op token.Token) {
|
||||
t, ok := c.typeInfo.Types[expr].Type.Underlying().(*types.Basic)
|
||||
if ok && t.Info()&types.IsNumeric != 0 {
|
||||
return opcode.NUMEQUAL
|
||||
isNum := ok && t.Info()&types.IsNumeric != 0
|
||||
switch op {
|
||||
case token.EQL:
|
||||
if isNum {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NUMEQUAL)
|
||||
} else {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.EQUAL)
|
||||
}
|
||||
case token.NEQ:
|
||||
if isNum {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NUMNOTEQUAL)
|
||||
} else {
|
||||
emit.Opcode(c.prog.BinWriter, opcode.EQUAL)
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NOT)
|
||||
}
|
||||
default:
|
||||
panic("invalid token in emitEqual()")
|
||||
}
|
||||
|
||||
return opcode.EQUAL
|
||||
}
|
||||
|
||||
// getByteArray returns byte array value from constant expr.
|
||||
|
@ -1059,6 +1083,19 @@ func (c *codegen) convertBuiltin(expr *ast.CallExpr) {
|
|||
} else {
|
||||
c.prog.Err = errors.New("panic should have string or nil argument")
|
||||
}
|
||||
case "delete", "Remove":
|
||||
arg := expr.Args[0]
|
||||
errNotSupported := errors.New("only maps and non-byte slices are supported in `Remove`")
|
||||
switch typ := c.typeInfo.Types[arg].Type.Underlying().(type) {
|
||||
case *types.Map:
|
||||
case *types.Slice:
|
||||
if isByte(typ.Elem()) {
|
||||
c.prog.Err = errNotSupported
|
||||
}
|
||||
default:
|
||||
c.prog.Err = errNotSupported
|
||||
}
|
||||
emit.Opcode(c.prog.BinWriter, opcode.REMOVE)
|
||||
case "SHA256":
|
||||
emit.Opcode(c.prog.BinWriter, opcode.SHA256)
|
||||
case "SHA1":
|
||||
|
@ -1069,14 +1106,23 @@ func (c *codegen) convertBuiltin(expr *ast.CallExpr) {
|
|||
emit.Opcode(c.prog.BinWriter, opcode.HASH160)
|
||||
case "VerifySignature":
|
||||
emit.Opcode(c.prog.BinWriter, opcode.VERIFY)
|
||||
case "AppCall":
|
||||
numArgs := len(expr.Args) - 1
|
||||
case "AppCall", "DynAppCall":
|
||||
numArgs := len(expr.Args)
|
||||
if name == "AppCall" {
|
||||
numArgs--
|
||||
}
|
||||
c.emitReverse(numArgs)
|
||||
|
||||
emit.Opcode(c.prog.BinWriter, opcode.APPCALL)
|
||||
buf := c.getByteArray(expr.Args[0])
|
||||
if len(buf) != 20 {
|
||||
c.prog.Err = errors.New("invalid script hash")
|
||||
var buf []byte
|
||||
if name == "AppCall" {
|
||||
buf = c.getByteArray(expr.Args[0])
|
||||
if len(buf) != 20 {
|
||||
c.prog.Err = errors.New("invalid script hash")
|
||||
}
|
||||
} else {
|
||||
// Zeroes for DynAppCall.
|
||||
buf = make([]byte, 20)
|
||||
}
|
||||
|
||||
c.prog.WriteBytes(buf)
|
||||
|
@ -1192,11 +1238,32 @@ func (c *codegen) convertStruct(lit *ast.CompositeLit) {
|
|||
|
||||
emit.Opcode(c.prog.BinWriter, opcode.DUP)
|
||||
emit.Int(c.prog.BinWriter, int64(i))
|
||||
c.emitLoadConst(typeAndVal)
|
||||
c.emitDefault(typeAndVal.Type)
|
||||
emit.Opcode(c.prog.BinWriter, opcode.SETITEM)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *codegen) emitDefault(typ types.Type) {
|
||||
switch t := c.scTypeFromGo(typ); t {
|
||||
case "Integer":
|
||||
emit.Int(c.prog.BinWriter, 0)
|
||||
case "Boolean":
|
||||
emit.Bool(c.prog.BinWriter, false)
|
||||
case "String":
|
||||
emit.String(c.prog.BinWriter, "")
|
||||
case "Map":
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NEWMAP)
|
||||
case "Struct":
|
||||
emit.Int(c.prog.BinWriter, int64(typ.(*types.Struct).NumFields()))
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NEWSTRUCT)
|
||||
case "Array":
|
||||
emit.Int(c.prog.BinWriter, 0)
|
||||
emit.Opcode(c.prog.BinWriter, opcode.NEWARRAY)
|
||||
case "ByteArray":
|
||||
emit.Bytes(c.prog.BinWriter, []byte{})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *codegen) convertToken(tok token.Token) {
|
||||
switch tok {
|
||||
case token.ADD_ASSIGN:
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"golang.org/x/tools/go/loader"
|
||||
)
|
||||
|
||||
|
@ -26,6 +27,12 @@ type Options struct {
|
|||
|
||||
// The name of the output for debug info.
|
||||
DebugInfo string
|
||||
|
||||
// The name of the output for application binary interface info.
|
||||
ABIInfo string
|
||||
|
||||
// Contract metadata.
|
||||
ContractDetails *smartcontract.ContractDetails
|
||||
}
|
||||
|
||||
type buildInfo struct {
|
||||
|
@ -105,5 +112,16 @@ func CompileAndSave(src string, o *Options) ([]byte, error) {
|
|||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
return b, ioutil.WriteFile(o.DebugInfo, data, os.ModePerm)
|
||||
if err := ioutil.WriteFile(o.DebugInfo, data, os.ModePerm); err != nil {
|
||||
return b, err
|
||||
}
|
||||
if o.ABIInfo == "" {
|
||||
return b, err
|
||||
}
|
||||
abi := di.convertToABI(b, o.ContractDetails)
|
||||
abiData, err := json.Marshal(abi)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
return b, ioutil.WriteFile(o.ABIInfo, abiData, os.ModePerm)
|
||||
}
|
||||
|
|
|
@ -8,6 +8,10 @@ import (
|
|||
"go/types"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// DebugInfo represents smart-contract debug information.
|
||||
|
@ -72,8 +76,42 @@ type DebugRange struct {
|
|||
|
||||
// DebugParam represents variables's name and type.
|
||||
type DebugParam struct {
|
||||
Name string
|
||||
Type string
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// ABI represents ABI contract info in compatible with NEO Blockchain Toolkit format
|
||||
type ABI struct {
|
||||
Hash util.Uint160 `json:"hash"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
EntryPoint string `json:"entrypoint"`
|
||||
Functions []Method `json:"functions"`
|
||||
Events []Event `json:"events"`
|
||||
}
|
||||
|
||||
// Metadata represents ABI contract metadata
|
||||
type Metadata struct {
|
||||
Author string `json:"author"`
|
||||
Email string `json:"email"`
|
||||
Version string `json:"version"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
HasStorage bool `json:"has-storage"`
|
||||
HasDynamicInvocation bool `json:"has-dynamic-invoke"`
|
||||
IsPayable bool `json:"is-payable"`
|
||||
}
|
||||
|
||||
// Method represents ABI method's metadata.
|
||||
type Method struct {
|
||||
Name string `json:"name"`
|
||||
Parameters []DebugParam `json:"parameters"`
|
||||
ReturnType string `json:"returntype"`
|
||||
}
|
||||
|
||||
// Event represents ABI event's metadata.
|
||||
type Event struct {
|
||||
Name string `json:"name"`
|
||||
Parameters []DebugParam `json:"parameters"`
|
||||
}
|
||||
|
||||
func (c *codegen) saveSequencePoint(n ast.Node) {
|
||||
|
@ -112,7 +150,7 @@ func (c *codegen) registerDebugVariable(name string, expr ast.Expr) {
|
|||
func (c *codegen) methodInfoFromScope(name string, scope *funcScope) *MethodDebugInfo {
|
||||
ps := scope.decl.Type.Params
|
||||
params := make([]DebugParam, 0, ps.NumFields())
|
||||
for i := range params {
|
||||
for i := range ps.List {
|
||||
for j := range ps.List[i].Names {
|
||||
params = append(params, DebugParam{
|
||||
Name: ps.List[i].Names[j].Name,
|
||||
|
@ -145,7 +183,11 @@ func (c *codegen) scReturnTypeFromScope(scope *funcScope) string {
|
|||
}
|
||||
|
||||
func (c *codegen) scTypeFromExpr(typ ast.Expr) string {
|
||||
switch t := c.typeInfo.Types[typ].Type.(type) {
|
||||
return c.scTypeFromGo(c.typeInfo.Types[typ].Type)
|
||||
}
|
||||
|
||||
func (c *codegen) scTypeFromGo(typ types.Type) string {
|
||||
switch t := typ.(type) {
|
||||
case *types.Basic:
|
||||
info := t.Info()
|
||||
switch {
|
||||
|
@ -260,3 +302,40 @@ func parsePairJSON(data []byte, sep string) (string, string, error) {
|
|||
}
|
||||
return ss[0], ss[1], nil
|
||||
}
|
||||
|
||||
func (di *DebugInfo) convertToABI(contract []byte, cd *smartcontract.ContractDetails) ABI {
|
||||
methods := make([]Method, 0)
|
||||
for _, method := range di.Methods {
|
||||
if method.Name.Name == di.EntryPoint {
|
||||
methods = append(methods, Method{
|
||||
Name: method.Name.Name,
|
||||
Parameters: method.Parameters,
|
||||
ReturnType: cd.ReturnType.String(),
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
events := make([]Event, len(di.Events))
|
||||
for i, event := range di.Events {
|
||||
events[i] = Event{
|
||||
Name: event.Name,
|
||||
Parameters: event.Parameters,
|
||||
}
|
||||
}
|
||||
return ABI{
|
||||
Hash: hash.Hash160(contract),
|
||||
Metadata: Metadata{
|
||||
Author: cd.Author,
|
||||
Email: cd.Email,
|
||||
Version: cd.Version,
|
||||
Title: cd.ProjectName,
|
||||
Description: cd.Description,
|
||||
HasStorage: cd.HasStorage,
|
||||
HasDynamicInvocation: cd.HasDynamicInvocation,
|
||||
IsPayable: cd.IsPayable,
|
||||
},
|
||||
EntryPoint: di.EntryPoint,
|
||||
Functions: methods,
|
||||
Events: events,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,9 @@ package compiler
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/testserdes"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -28,6 +30,9 @@ func methodInt(a string) int {
|
|||
}
|
||||
return 3
|
||||
}
|
||||
func methodConcat(a, b string, c string) string{
|
||||
return a + b + c
|
||||
}
|
||||
func methodString() string { return "" }
|
||||
func methodByteArray() []byte { return nil }
|
||||
func methodArray() []bool { return nil }
|
||||
|
@ -48,6 +53,7 @@ func methodStruct() struct{} { return struct{}{} }
|
|||
t.Run("return types", func(t *testing.T) {
|
||||
returnTypes := map[string]string{
|
||||
"methodInt": "Integer",
|
||||
"methodConcat": "String",
|
||||
"methodString": "String", "methodByteArray": "ByteArray",
|
||||
"methodArray": "Array", "methodStruct": "Struct",
|
||||
"Main": "Boolean",
|
||||
|
@ -70,12 +76,95 @@ func methodStruct() struct{} { return struct{}{} }
|
|||
}
|
||||
})
|
||||
|
||||
t.Run("param types", func(t *testing.T) {
|
||||
paramTypes := map[string][]DebugParam{
|
||||
"methodInt": {{
|
||||
Name: "a",
|
||||
Type: "String",
|
||||
}},
|
||||
"methodConcat": {
|
||||
{
|
||||
Name: "a",
|
||||
Type: "String",
|
||||
},
|
||||
{
|
||||
Name: "b",
|
||||
Type: "String",
|
||||
},
|
||||
{
|
||||
Name: "c",
|
||||
Type: "String",
|
||||
},
|
||||
},
|
||||
"Main": {{
|
||||
Name: "op",
|
||||
Type: "String",
|
||||
}},
|
||||
}
|
||||
for i := range d.Methods {
|
||||
v, ok := paramTypes[d.Methods[i].Name.Name]
|
||||
if ok {
|
||||
require.Equal(t, v, d.Methods[i].Parameters)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// basic check that last instruction of every method is indeed RET
|
||||
for i := range d.Methods {
|
||||
index := d.Methods[i].Range.End
|
||||
require.True(t, int(index) < len(buf))
|
||||
require.EqualValues(t, opcode.RET, buf[index])
|
||||
}
|
||||
|
||||
t.Run("convert to ABI", func(t *testing.T) {
|
||||
author := "Joe"
|
||||
email := "Joe@ex.com"
|
||||
version := "1.0"
|
||||
title := "MyProj"
|
||||
description := "Description"
|
||||
actual := d.convertToABI(buf, &smartcontract.ContractDetails{
|
||||
Author: author,
|
||||
Email: email,
|
||||
Version: version,
|
||||
ProjectName: title,
|
||||
Description: description,
|
||||
HasStorage: true,
|
||||
HasDynamicInvocation: false,
|
||||
IsPayable: false,
|
||||
ReturnType: smartcontract.BoolType,
|
||||
Parameters: []smartcontract.ParamType{
|
||||
smartcontract.StringType,
|
||||
},
|
||||
})
|
||||
expected := ABI{
|
||||
Hash: hash.Hash160(buf),
|
||||
Metadata: Metadata{
|
||||
Author: author,
|
||||
Email: email,
|
||||
Version: version,
|
||||
Title: title,
|
||||
Description: description,
|
||||
HasStorage: true,
|
||||
HasDynamicInvocation: false,
|
||||
IsPayable: false,
|
||||
},
|
||||
EntryPoint: mainIdent,
|
||||
Functions: []Method{
|
||||
{
|
||||
Name: mainIdent,
|
||||
Parameters: []DebugParam{
|
||||
{
|
||||
Name: "op",
|
||||
Type: "String",
|
||||
},
|
||||
},
|
||||
ReturnType: "Boolean",
|
||||
},
|
||||
},
|
||||
Events: []Event{},
|
||||
}
|
||||
assert.Equal(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSequencePoints(t *testing.T) {
|
||||
|
|
|
@ -65,9 +65,11 @@ func (c *funcScope) analyzeVoidCalls(node ast.Node) bool {
|
|||
}
|
||||
}
|
||||
case *ast.ReturnStmt:
|
||||
switch n.Results[0].(type) {
|
||||
case *ast.CallExpr:
|
||||
return false
|
||||
if len(n.Results) > 0 {
|
||||
switch n.Results[0].(type) {
|
||||
case *ast.CallExpr:
|
||||
return false
|
||||
}
|
||||
}
|
||||
case *ast.BinaryExpr:
|
||||
return false
|
||||
|
@ -82,6 +84,11 @@ func (c *funcScope) stackSize() int64 {
|
|||
size := 0
|
||||
ast.Inspect(c.decl, func(n ast.Node) bool {
|
||||
switch n := n.(type) {
|
||||
case *ast.FuncType:
|
||||
num := n.Results.NumFields()
|
||||
if num != 0 && len(n.Results.List[0].Names) != 0 {
|
||||
size += num
|
||||
}
|
||||
case *ast.AssignStmt:
|
||||
if n.Tok == token.DEFINE {
|
||||
size += len(n.Rhs)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package compiler_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
@ -121,7 +122,50 @@ func TestFunctionWithVoidReturn(t *testing.T) {
|
|||
return x + y
|
||||
}
|
||||
|
||||
func getSomeInteger() { }
|
||||
func getSomeInteger() { %s }
|
||||
`
|
||||
eval(t, src, big.NewInt(6))
|
||||
t.Run("EmptyBody", func(t *testing.T) {
|
||||
src := fmt.Sprintf(src, "")
|
||||
eval(t, src, big.NewInt(6))
|
||||
})
|
||||
t.Run("SingleReturn", func(t *testing.T) {
|
||||
src := fmt.Sprintf(src, "return")
|
||||
eval(t, src, big.NewInt(6))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFunctionWithVoidReturnBranch(t *testing.T) {
|
||||
src := `
|
||||
package testcase
|
||||
func Main() int {
|
||||
x := %t
|
||||
f(x)
|
||||
return 2
|
||||
}
|
||||
|
||||
func f(x bool) {
|
||||
if x {
|
||||
return
|
||||
}
|
||||
}
|
||||
`
|
||||
t.Run("ReturnBranch", func(t *testing.T) {
|
||||
src := fmt.Sprintf(src, true)
|
||||
eval(t, src, big.NewInt(2))
|
||||
})
|
||||
t.Run("NoReturn", func(t *testing.T) {
|
||||
src := fmt.Sprintf(src, false)
|
||||
eval(t, src, big.NewInt(2))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFunctionWithMultipleArgumentNames(t *testing.T) {
|
||||
src := `package foo
|
||||
func Main() int {
|
||||
return add(1, 2)
|
||||
}
|
||||
func add(a, b int) int {
|
||||
return a + b
|
||||
}`
|
||||
eval(t, src, big.NewInt(3))
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package compiler_test
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -9,9 +10,60 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
srcTmpl := `package foo
|
||||
import "github.com/nspcc-dev/neo-go/pkg/interop/util"
|
||||
func Main() int {
|
||||
a := %s
|
||||
util.Remove(a, %d)
|
||||
return len(a) * a[%d]
|
||||
}`
|
||||
testRemove := func(item string, key, index, result int64) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, item, key, index)
|
||||
if result > 0 {
|
||||
eval(t, src, big.NewInt(result))
|
||||
return
|
||||
}
|
||||
v := vmAndCompile(t, src)
|
||||
require.Error(t, v.Run())
|
||||
}
|
||||
}
|
||||
t.Run("Map", func(t *testing.T) {
|
||||
item := "map[int]int{1: 2, 5: 7, 11: 13}"
|
||||
t.Run("RemovedKey", testRemove(item, 5, 5, -1))
|
||||
t.Run("AnotherKey", testRemove(item, 5, 11, 26))
|
||||
})
|
||||
t.Run("Slice", func(t *testing.T) {
|
||||
item := "[]int{5, 7, 11, 13}"
|
||||
t.Run("RemovedKey", testRemove(item, 2, 2, 39))
|
||||
t.Run("AnotherKey", testRemove(item, 2, 1, 21))
|
||||
t.Run("LastKey", testRemove(item, 2, 3, -1))
|
||||
})
|
||||
t.Run("Invalid", func(t *testing.T) {
|
||||
srcTmpl := `package foo
|
||||
import "github.com/nspcc-dev/neo-go/pkg/interop/util"
|
||||
func Main() int {
|
||||
util.Remove(%s, 2)
|
||||
return 1
|
||||
}`
|
||||
t.Run("BasicType", func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, "1")
|
||||
_, err := compiler.Compile(strings.NewReader(src))
|
||||
require.Error(t, err)
|
||||
})
|
||||
t.Run("ByteSlice", func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, "[]byte{1, 2}")
|
||||
_, err := compiler.Compile(strings.NewReader(src))
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromAddress(t *testing.T) {
|
||||
as1 := "Aej1fe4mUgou48Zzup5j8sPrE3973cJ5oz"
|
||||
addr1, err := address.StringToUint160(as1)
|
||||
|
@ -52,6 +104,19 @@ func TestFromAddress(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAppCall(t *testing.T) {
|
||||
const srcDynApp = `
|
||||
package foo
|
||||
import "github.com/nspcc-dev/neo-go/pkg/interop/engine"
|
||||
func Main(h []byte) []byte {
|
||||
x := []byte{1, 2}
|
||||
y := []byte{3, 4}
|
||||
result := engine.DynAppCall(h, x, y)
|
||||
return result.([]byte)
|
||||
}
|
||||
`
|
||||
|
||||
var hasDynamicInvoke bool
|
||||
|
||||
srcInner := `
|
||||
package foo
|
||||
func Main(a []byte, b []byte) []byte {
|
||||
|
@ -62,14 +127,31 @@ func TestAppCall(t *testing.T) {
|
|||
inner, err := compiler.Compile(strings.NewReader(srcInner))
|
||||
require.NoError(t, err)
|
||||
|
||||
dynapp, err := compiler.Compile(strings.NewReader(srcDynApp))
|
||||
require.NoError(t, err)
|
||||
|
||||
ih := hash.Hash160(inner)
|
||||
dh := hash.Hash160(dynapp)
|
||||
getScript := func(u util.Uint160) ([]byte, bool) {
|
||||
if u.Equals(ih) {
|
||||
return inner, true
|
||||
}
|
||||
if u.Equals(dh) {
|
||||
return dynapp, hasDynamicInvoke
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
dynEntryScript := `
|
||||
package foo
|
||||
import "github.com/nspcc-dev/neo-go/pkg/interop/engine"
|
||||
func Main(h []byte) interface{} {
|
||||
return engine.AppCall(` + fmt.Sprintf("%#v", dh.BytesBE()) + `, h)
|
||||
}
|
||||
`
|
||||
dynentry, err := compiler.Compile(strings.NewReader(dynEntryScript))
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("valid script", func(t *testing.T) {
|
||||
src := getAppCallScript(fmt.Sprintf("%#v", ih.BytesBE()))
|
||||
v := vmAndCompile(t, src)
|
||||
|
@ -118,6 +200,38 @@ func TestAppCall(t *testing.T) {
|
|||
|
||||
assertResult(t, v, []byte{1, 2, 3, 4})
|
||||
})
|
||||
|
||||
t.Run("dynamic", func(t *testing.T) {
|
||||
t.Run("valid script", func(t *testing.T) {
|
||||
hasDynamicInvoke = true
|
||||
v := vm.New()
|
||||
v.Load(dynentry)
|
||||
v.SetScriptGetter(getScript)
|
||||
v.Estack().PushVal(ih.BytesBE())
|
||||
|
||||
require.NoError(t, v.Run())
|
||||
|
||||
assertResult(t, v, []byte{1, 2, 3, 4})
|
||||
})
|
||||
t.Run("invalid script", func(t *testing.T) {
|
||||
hasDynamicInvoke = true
|
||||
v := vm.New()
|
||||
v.Load(dynentry)
|
||||
v.SetScriptGetter(getScript)
|
||||
v.Estack().PushVal([]byte{1})
|
||||
|
||||
require.Error(t, v.Run())
|
||||
})
|
||||
t.Run("no dynamic invoke", func(t *testing.T) {
|
||||
hasDynamicInvoke = false
|
||||
v := vm.New()
|
||||
v.Load(dynentry)
|
||||
v.SetScriptGetter(getScript)
|
||||
v.Estack().PushVal(ih.BytesBE())
|
||||
|
||||
require.Error(t, v.Run())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func getAppCallScript(h string) string {
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
package compiler_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMultipleReturn1(t *testing.T) {
|
||||
|
@ -84,3 +88,30 @@ func TestSingleReturn(t *testing.T) {
|
|||
`
|
||||
eval(t, src, big.NewInt(9))
|
||||
}
|
||||
|
||||
func TestNamedReturn(t *testing.T) {
|
||||
src := `package foo
|
||||
func Main() (a int, b int) {
|
||||
a = 1
|
||||
b = 2
|
||||
c := 3
|
||||
_ = c
|
||||
return %s
|
||||
}`
|
||||
|
||||
runCase := func(ret string, result ...interface{}) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
src := fmt.Sprintf(src, ret)
|
||||
v := vmAndCompile(t, src)
|
||||
require.NoError(t, v.Run())
|
||||
require.Equal(t, len(result), v.Estack().Len())
|
||||
for i := range result {
|
||||
assert.EqualValues(t, result[i], v.Estack().Pop().Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("NormalReturn", runCase("a, b", big.NewInt(1), big.NewInt(2)))
|
||||
t.Run("EmptyReturn", runCase("", big.NewInt(1), big.NewInt(2)))
|
||||
t.Run("AnotherVariable", runCase("b, c", big.NewInt(2), big.NewInt(3)))
|
||||
}
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
package compiler_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/compiler"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var sliceTestCases = []testCase{
|
||||
|
@ -33,6 +37,16 @@ var sliceTestCases = []testCase{
|
|||
`,
|
||||
big.NewInt(42),
|
||||
},
|
||||
{
|
||||
"increase slice element with +=",
|
||||
`package foo
|
||||
func Main() int {
|
||||
a := []int{1, 2, 3}
|
||||
a[1] += 40
|
||||
return a[1]
|
||||
}`,
|
||||
big.NewInt(42),
|
||||
},
|
||||
{
|
||||
"complex test",
|
||||
`
|
||||
|
@ -165,6 +179,31 @@ func TestSliceOperations(t *testing.T) {
|
|||
runTestCases(t, sliceTestCases)
|
||||
}
|
||||
|
||||
func TestSliceEmpty(t *testing.T) {
|
||||
srcTmpl := `package foo
|
||||
func Main() int {
|
||||
var a []int
|
||||
%s
|
||||
if %s {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}`
|
||||
t.Run("WithNil", func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, "", "a == nil")
|
||||
_, err := compiler.Compile(strings.NewReader(src))
|
||||
require.Error(t, err)
|
||||
})
|
||||
t.Run("WithLen", func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, "", "len(a) == 0")
|
||||
eval(t, src, big.NewInt(1))
|
||||
})
|
||||
t.Run("NonEmpty", func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, "a = []int{1}", "len(a) == 0")
|
||||
eval(t, src, big.NewInt(2))
|
||||
})
|
||||
}
|
||||
|
||||
func TestJumps(t *testing.T) {
|
||||
src := `
|
||||
package foo
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package compiler_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
|
@ -134,6 +135,17 @@ var structTestCases = []testCase{
|
|||
}`,
|
||||
big.NewInt(14),
|
||||
},
|
||||
{
|
||||
"increase struct field with +=",
|
||||
`package foo
|
||||
type token struct { x int }
|
||||
func Main() int {
|
||||
t := token{x: 2}
|
||||
t.x += 3
|
||||
return t.x
|
||||
}`,
|
||||
big.NewInt(5),
|
||||
},
|
||||
{
|
||||
"assign a struct field to a struct field",
|
||||
`
|
||||
|
@ -327,8 +339,53 @@ var structTestCases = []testCase{
|
|||
}`,
|
||||
big.NewInt(2),
|
||||
},
|
||||
{
|
||||
"uninitialized struct fields",
|
||||
`package foo
|
||||
type Foo struct {
|
||||
i int
|
||||
m map[string]int
|
||||
b []byte
|
||||
a []int
|
||||
s struct { ii int }
|
||||
}
|
||||
func NewFoo() Foo { return Foo{} }
|
||||
func Main() int {
|
||||
foo := NewFoo()
|
||||
if foo.i != 0 { return 1 }
|
||||
if len(foo.m) != 0 { return 1 }
|
||||
if len(foo.b) != 0 { return 1 }
|
||||
if len(foo.a) != 0 { return 1 }
|
||||
s := foo.s
|
||||
if s.ii != 0 { return 1 }
|
||||
return 2
|
||||
}`,
|
||||
big.NewInt(2),
|
||||
},
|
||||
}
|
||||
|
||||
func TestStructs(t *testing.T) {
|
||||
runTestCases(t, structTestCases)
|
||||
}
|
||||
|
||||
func TestStructCompare(t *testing.T) {
|
||||
srcTmpl := `package testcase
|
||||
type T struct { f int }
|
||||
func Main() int {
|
||||
a := T{f: %d}
|
||||
b := T{f: %d}
|
||||
if a != b {
|
||||
return 2
|
||||
}
|
||||
return 1
|
||||
}`
|
||||
t.Run("Equal", func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, 4, 4)
|
||||
eval(t, src, big.NewInt(1))
|
||||
})
|
||||
t.Run("NotEqual", func(t *testing.T) {
|
||||
src := fmt.Sprintf(srcTmpl, 4, 5)
|
||||
eval(t, src, big.NewInt(2))
|
||||
})
|
||||
|
||||
}
|
||||
|
|
|
@ -1,12 +1,34 @@
|
|||
package compiler
|
||||
|
||||
var syscalls = map[string]map[string]string{
|
||||
"account": {
|
||||
"GetBalance": "Neo.Account.GetBalance",
|
||||
"GetScriptHash": "Neo.Account.GetScriptHash",
|
||||
"GetVotes": "Neo.Account.GetVotes",
|
||||
"IsStandard": "Neo.Account.IsStandard",
|
||||
},
|
||||
"attribute": {
|
||||
"GetUsage": "Neo.Attribute.GetUsage",
|
||||
"GetData": "Neo.Attribute.GetData",
|
||||
},
|
||||
"crypto": {
|
||||
"Secp256k1Recover": "Neo.Cryptography.Secp256k1Recover",
|
||||
"Secp256r1Recover": "Neo.Cryptography.Secp256r1Recover",
|
||||
},
|
||||
"enumerator": {
|
||||
"Concat": "Neo.Enumerator.Concat",
|
||||
"Create": "Neo.Enumerator.Create",
|
||||
"Next": "Neo.Enumerator.Next",
|
||||
"Value": "Neo.Enumerator.Value",
|
||||
},
|
||||
"storage": {
|
||||
"GetContext": "Neo.Storage.GetContext",
|
||||
"Put": "Neo.Storage.Put",
|
||||
"Get": "Neo.Storage.Get",
|
||||
"Delete": "Neo.Storage.Delete",
|
||||
"Find": "Neo.Storage.Find",
|
||||
"ConvertContextToReadOnly": "Neo.StorageContext.AsReadOnly",
|
||||
"Delete": "Neo.Storage.Delete",
|
||||
"Find": "Neo.Storage.Find",
|
||||
"Get": "Neo.Storage.Get",
|
||||
"GetContext": "Neo.Storage.GetContext",
|
||||
"GetReadOnlyContext": "Neo.Storage.GetReadOnlyContext",
|
||||
"Put": "Neo.Storage.Put",
|
||||
},
|
||||
"runtime": {
|
||||
"GetTrigger": "Neo.Runtime.GetTrigger",
|
||||
|
@ -18,14 +40,15 @@ var syscalls = map[string]map[string]string{
|
|||
"Deserialize": "Neo.Runtime.Deserialize",
|
||||
},
|
||||
"blockchain": {
|
||||
"GetHeight": "Neo.Blockchain.GetHeight",
|
||||
"GetHeader": "Neo.Blockchain.GetHeader",
|
||||
"GetBlock": "Neo.Blockchain.GetBlock",
|
||||
"GetTransaction": "Neo.Blockchain.GetTransaction",
|
||||
"GetContract": "Neo.Blockchain.GetContract",
|
||||
"GetAccount": "Neo.Blockchain.GetAccount",
|
||||
"GetValidators": "Neo.Blockchain.GetValidators",
|
||||
"GetAsset": "Neo.Blockchain.GetAsset",
|
||||
"GetAccount": "Neo.Blockchain.GetAccount",
|
||||
"GetAsset": "Neo.Blockchain.GetAsset",
|
||||
"GetBlock": "Neo.Blockchain.GetBlock",
|
||||
"GetContract": "Neo.Blockchain.GetContract",
|
||||
"GetHeader": "Neo.Blockchain.GetHeader",
|
||||
"GetHeight": "Neo.Blockchain.GetHeight",
|
||||
"GetTransaction": "Neo.Blockchain.GetTransaction",
|
||||
"GetTransactionHeight": "Neo.Blockchain.GetTransactionHeight",
|
||||
"GetValidators": "Neo.Blockchain.GetValidators",
|
||||
},
|
||||
"header": {
|
||||
"GetIndex": "Neo.Header.GetIndex",
|
||||
|
@ -43,20 +66,26 @@ var syscalls = map[string]map[string]string{
|
|||
"GetTransaction": "Neo.Block.GetTransaction",
|
||||
},
|
||||
"transaction": {
|
||||
"GetHash": "Neo.Transaction.GetHash",
|
||||
"GetType": "Neo.Transaction.GetType",
|
||||
"GetAttributes": "Neo.Transaction.GetAttributes",
|
||||
"GetHash": "Neo.Transaction.GetHash",
|
||||
"GetInputs": "Neo.Transaction.GetInputs",
|
||||
"GetOutputs": "Neo.Transaction.GetOutputs",
|
||||
"GetReferences": "Neo.Transaction.GetReferences",
|
||||
"GetScript": "Neo.InvocationTransaction.GetScript",
|
||||
"GetType": "Neo.Transaction.GetType",
|
||||
"GetUnspentCoins": "Neo.Transaction.GetUnspentCoins",
|
||||
"GetScript": "Neo.Transaction.GetScript",
|
||||
"GetWitnesses": "Neo.Transaction.GetWitnesses",
|
||||
},
|
||||
"asset": {
|
||||
"Create": "Neo.Asset.Create",
|
||||
"GetAdmin": "Neo.Asset.GetAdmin",
|
||||
"GetAmount": "Neo.Asset.GetAmount",
|
||||
"GetAssetID": "Neo.Asset.GetAssetID",
|
||||
"GetAssetType": "Neo.Asset.GetAssetType",
|
||||
"GetAmount": "Neo.Asset.GetAmount",
|
||||
"Create": "Neo.Asset.Create",
|
||||
"GetAvailable": "Neo.Asset.GetAvailable",
|
||||
"GetIssuer": "Neo.Asset.GetIssuer",
|
||||
"GetOwner": "Neo.Asset.GetOwner",
|
||||
"GetPrecision": "Neo.Asset.GetPrecision",
|
||||
"Renew": "Neo.Asset.Renew",
|
||||
},
|
||||
"contract": {
|
||||
|
@ -83,6 +112,7 @@ var syscalls = map[string]map[string]string{
|
|||
"GetExecutingScriptHash": "System.ExecutionEngine.GetExecutingScriptHash",
|
||||
},
|
||||
"iterator": {
|
||||
"Concat": "Neo.Iterator.Concat",
|
||||
"Create": "Neo.Iterator.Create",
|
||||
"Key": "Neo.Iterator.Key",
|
||||
"Keys": "Neo.Iterator.Keys",
|
||||
|
@ -90,4 +120,7 @@ var syscalls = map[string]map[string]string{
|
|||
"Value": "Neo.Iterator.Value",
|
||||
"Values": "Neo.Iterator.Values",
|
||||
},
|
||||
"witness": {
|
||||
"GetVerificationScript": "Neo.Witness.GetVerificationScript",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -5,8 +5,8 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/go-yaml/yaml"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const userAgentFormat = "/NEO-GO:%s/"
|
||||
|
|
|
@ -20,25 +20,40 @@ const (
|
|||
type (
|
||||
ProtocolConfiguration struct {
|
||||
AddressVersion byte `yaml:"AddressVersion"`
|
||||
// EnableStateRoot specifies if exchange of state roots should be enabled.
|
||||
EnableStateRoot bool `yaml:"EnableStateRoot"`
|
||||
// KeepOnlyLatestState specifies if MPT should only store latest state.
|
||||
// If true, DB size will be smaller, but older roots won't be accessible.
|
||||
// This value should remain the same for the same database.
|
||||
KeepOnlyLatestState bool `yaml:"KeepOnlyLatestState"`
|
||||
// FeePerExtraByte sets the expected per-byte fee for
|
||||
// transactions exceeding the MaxFreeTransactionSize.
|
||||
FeePerExtraByte float64 `yaml:"FeePerExtraByte"`
|
||||
// FreeGasLimit is an amount of GAS which can be spent for free.
|
||||
FreeGasLimit util.Fixed8 `yaml:"FreeGasLimit"`
|
||||
LowPriorityThreshold float64 `yaml:"LowPriorityThreshold"`
|
||||
Magic NetMode `yaml:"Magic"`
|
||||
MaxTransactionsPerBlock int `yaml:"MaxTransactionsPerBlock"`
|
||||
// It can change over time, thus it's a map of block height to the
|
||||
// respective GAS limit.
|
||||
FreeGasLimit map[uint32]util.Fixed8 `yaml:"FreeGasLimit"`
|
||||
LowPriorityThreshold float64 `yaml:"LowPriorityThreshold"`
|
||||
Magic NetMode `yaml:"Magic"`
|
||||
// Maximum number of transactions allowed to be packed into block.
|
||||
MaxTransactionsPerBlock map[uint32]int `yaml:"MaxTransactionsPerBlock"`
|
||||
// Maximum size of low priority transaction in bytes.
|
||||
MaxFreeTransactionSize int `yaml:"MaxFreeTransactionSize"`
|
||||
// Maximum number of low priority transactions accepted into block.
|
||||
MaxFreeTransactionsPerBlock int `yaml:"MaxFreeTransactionsPerBlock"`
|
||||
MemPoolSize int `yaml:"MemPoolSize"`
|
||||
MaxFreeTransactionsPerBlock map[uint32]int `yaml:"MaxFreeTransactionsPerBlock"`
|
||||
// MinimumNetworkFee sets the minimum required network fee for transaction to pass validation.
|
||||
MinimumNetworkFee util.Fixed8 `yaml:"MinimumNetworkFee"`
|
||||
MemPoolSize int `yaml:"MemPoolSize"`
|
||||
// NoBonusHeight is the height where GAS generation stops.
|
||||
NoBonusHeight uint32 `yaml:"NoBonusHeight"`
|
||||
// SaveStorageBatch enables storage batch saving before every persist.
|
||||
SaveStorageBatch bool `yaml:"SaveStorageBatch"`
|
||||
SecondsPerBlock int `yaml:"SecondsPerBlock"`
|
||||
SeedList []string `yaml:"SeedList"`
|
||||
StandbyValidators []string `yaml:"StandbyValidators"`
|
||||
SystemFee SystemFee `yaml:"SystemFee"`
|
||||
SaveStorageBatch bool `yaml:"SaveStorageBatch"`
|
||||
SecondsPerBlock int `yaml:"SecondsPerBlock"`
|
||||
SeedList []string `yaml:"SeedList"`
|
||||
StandbyValidators []string `yaml:"StandbyValidators"`
|
||||
// StateRootEnableIndex specifies starting height for state root calculations and exchange.
|
||||
StateRootEnableIndex uint32 `yaml:"StateRootEnableIndex"`
|
||||
SystemFee SystemFee `yaml:"SystemFee"`
|
||||
// Whether to verify received blocks.
|
||||
VerifyBlocks bool `yaml:"VerifyBlocks"`
|
||||
// Whether to verify transactions in received blocks.
|
||||
|
@ -57,6 +72,43 @@ type (
|
|||
NetMode uint32
|
||||
)
|
||||
|
||||
// GetFreeGas returns FreeGasLimit value for given block height.
|
||||
func (p *ProtocolConfiguration) GetFreeGas(block uint32) util.Fixed8 {
|
||||
var gas util.Fixed8
|
||||
var height uint32
|
||||
for h, g := range p.FreeGasLimit {
|
||||
if h > block || h < height {
|
||||
continue
|
||||
}
|
||||
gas = g
|
||||
height = h
|
||||
}
|
||||
return gas
|
||||
}
|
||||
|
||||
func getIntFromMap(m map[uint32]int, block uint32) int {
|
||||
var res int
|
||||
var height uint32
|
||||
for h, i := range m {
|
||||
if h > block || h < height || i < 0 {
|
||||
continue
|
||||
}
|
||||
res = i
|
||||
height = h
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetMaxTxPerBlock returns MaxTransactionsPerBlock value for given block height.
|
||||
func (p *ProtocolConfiguration) GetMaxTxPerBlock(block uint32) int {
|
||||
return getIntFromMap(p.MaxTransactionsPerBlock, block)
|
||||
}
|
||||
|
||||
// GetMaxFreeTxPerBlock returns MaxFreeTransactionsPerBlock value for given block height.
|
||||
func (p *ProtocolConfiguration) GetMaxFreeTxPerBlock(block uint32) int {
|
||||
return getIntFromMap(p.MaxFreeTransactionsPerBlock, block)
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (n NetMode) String() string {
|
||||
switch n {
|
||||
|
|
25
pkg/config/protocol_config_test.go
Normal file
25
pkg/config/protocol_config_test.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetFreeGas(t *testing.T) {
|
||||
fixed10 := util.Fixed8FromInt64(10)
|
||||
fixed50 := util.Fixed8FromInt64(50)
|
||||
p := ProtocolConfiguration{
|
||||
FreeGasLimit: map[uint32]util.Fixed8{
|
||||
0: fixed10,
|
||||
6200000: fixed50,
|
||||
},
|
||||
}
|
||||
require.Equal(t, fixed10, p.GetFreeGas(0))
|
||||
require.Equal(t, fixed10, p.GetFreeGas(1000))
|
||||
require.Equal(t, fixed10, p.GetFreeGas(1000000))
|
||||
require.Equal(t, fixed10, p.GetFreeGas(6100000))
|
||||
require.Equal(t, fixed50, p.GetFreeGas(6200000))
|
||||
require.Equal(t, fixed50, p.GetFreeGas(7000000))
|
||||
}
|
|
@ -58,44 +58,23 @@ func (n *neoBlock) SetTransactions(txes []block.Transaction) {
|
|||
// Version implements block.Block interface.
|
||||
func (n *neoBlock) Version() uint32 { return n.Block.Version }
|
||||
|
||||
// SetVersion implements block.Block interface.
|
||||
func (n *neoBlock) SetVersion(v uint32) { n.Block.Version = v }
|
||||
|
||||
// PrevHash implements block.Block interface.
|
||||
func (n *neoBlock) PrevHash() util.Uint256 { return n.Block.PrevHash }
|
||||
|
||||
// SetPrevHash implements block.Block interface.
|
||||
func (n *neoBlock) SetPrevHash(h util.Uint256) { n.Block.PrevHash = h }
|
||||
|
||||
// MerkleRoot implements block.Block interface.
|
||||
func (n *neoBlock) MerkleRoot() util.Uint256 { return n.Block.MerkleRoot }
|
||||
|
||||
// SetMerkleRoot implements block.Block interface.
|
||||
func (n *neoBlock) SetMerkleRoot(r util.Uint256) { n.Block.MerkleRoot = r }
|
||||
|
||||
// Timestamp implements block.Block interface.
|
||||
func (n *neoBlock) Timestamp() uint32 { return n.Block.Timestamp }
|
||||
|
||||
// SetTimestamp implements block.Block interface.
|
||||
func (n *neoBlock) SetTimestamp(ts uint32) { n.Block.Timestamp = ts }
|
||||
func (n *neoBlock) Timestamp() uint64 { return uint64(n.Block.Timestamp) * 1000000000 }
|
||||
|
||||
// Index implements block.Block interface.
|
||||
func (n *neoBlock) Index() uint32 { return n.Block.Index }
|
||||
|
||||
// SetIndex implements block.Block interface.
|
||||
func (n *neoBlock) SetIndex(i uint32) { n.Block.Index = i }
|
||||
|
||||
// ConsensusData implements block.Block interface.
|
||||
func (n *neoBlock) ConsensusData() uint64 { return n.Block.ConsensusData }
|
||||
|
||||
// SetConsensusData implements block.Block interface.
|
||||
func (n *neoBlock) SetConsensusData(nonce uint64) { n.Block.ConsensusData = nonce }
|
||||
|
||||
// NextConsensus implements block.Block interface.
|
||||
func (n *neoBlock) NextConsensus() util.Uint160 { return n.Block.NextConsensus }
|
||||
|
||||
// SetNextConsensus implements block.Block interface.
|
||||
func (n *neoBlock) SetNextConsensus(h util.Uint160) { n.Block.NextConsensus = h }
|
||||
|
||||
// Signature implements block.Block interface.
|
||||
func (n *neoBlock) Signature() []byte { return n.signature }
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/nspcc-dev/dbft/block"
|
||||
"github.com/nspcc-dev/dbft/crypto"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -21,28 +22,29 @@ func TestNeoBlock_Sign(t *testing.T) {
|
|||
func TestNeoBlock_Setters(t *testing.T) {
|
||||
b := new(neoBlock)
|
||||
|
||||
b.SetVersion(1)
|
||||
b.Block.Version = 1
|
||||
require.EqualValues(t, 1, b.Version())
|
||||
|
||||
b.SetIndex(12)
|
||||
b.Block.Index = 12
|
||||
require.EqualValues(t, 12, b.Index())
|
||||
|
||||
b.SetTimestamp(777)
|
||||
require.EqualValues(t, 777, b.Timestamp())
|
||||
b.Block.Timestamp = 777
|
||||
require.EqualValues(t, 777*1000000000, b.Timestamp()) // Nanoseconds.
|
||||
|
||||
b.SetConsensusData(456)
|
||||
b.Block.ConsensusData = 456
|
||||
require.EqualValues(t, 456, b.ConsensusData())
|
||||
|
||||
b.SetMerkleRoot(util.Uint256{1, 2, 3, 4})
|
||||
b.Block.MerkleRoot = util.Uint256{1, 2, 3, 4}
|
||||
require.Equal(t, util.Uint256{1, 2, 3, 4}, b.MerkleRoot())
|
||||
|
||||
b.SetNextConsensus(util.Uint160{9, 2})
|
||||
b.Block.NextConsensus = util.Uint160{9, 2}
|
||||
require.Equal(t, util.Uint160{9, 2}, b.NextConsensus())
|
||||
|
||||
b.SetPrevHash(util.Uint256{9, 8, 7})
|
||||
b.Block.PrevHash = util.Uint256{9, 8, 7}
|
||||
require.Equal(t, util.Uint256{9, 8, 7}, b.PrevHash())
|
||||
|
||||
txx := []block.Transaction{newMinerTx(123)}
|
||||
b.SetTransactions(txx)
|
||||
tx := newMinerTx(123)
|
||||
txx := []block.Transaction{tx}
|
||||
b.Block.Transactions = []*transaction.Transaction{tx}
|
||||
require.Equal(t, txx, b.Transactions())
|
||||
}
|
||||
|
|
|
@ -30,7 +30,13 @@ func (c changeView) NewViewNumber() byte { return c.newViewNumber }
|
|||
func (c *changeView) SetNewViewNumber(view byte) { c.newViewNumber = view }
|
||||
|
||||
// Timestamp implements payload.ChangeView interface.
|
||||
func (c changeView) Timestamp() uint32 { return c.timestamp }
|
||||
func (c changeView) Timestamp() uint64 { return uint64(c.timestamp) * nanoInSec }
|
||||
|
||||
// SetTimestamp implements payload.ChangeView interface.
|
||||
func (c *changeView) SetTimestamp(ts uint32) { c.timestamp = ts }
|
||||
func (c *changeView) SetTimestamp(ts uint64) { c.timestamp = uint32(ts / nanoInSec) }
|
||||
|
||||
// Reason implements payload.ChangeView interface.
|
||||
func (c changeView) Reason() payload.ChangeViewReason { return payload.CVUnknown }
|
||||
|
||||
// SetReason implements payload.ChangeView interface.
|
||||
func (c *changeView) SetReason(_ payload.ChangeViewReason) {}
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
func TestChangeView_Setters(t *testing.T) {
|
||||
var c changeView
|
||||
|
||||
c.SetTimestamp(123)
|
||||
require.EqualValues(t, 123, c.Timestamp())
|
||||
c.SetTimestamp(123 * nanoInSec)
|
||||
require.EqualValues(t, 123*nanoInSec, c.Timestamp())
|
||||
|
||||
c.SetNewViewNumber(2)
|
||||
require.EqualValues(t, 2, c.NewViewNumber())
|
||||
|
|
|
@ -2,6 +2,7 @@ package consensus
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"time"
|
||||
|
@ -9,16 +10,20 @@ import (
|
|||
"github.com/nspcc-dev/dbft"
|
||||
"github.com/nspcc-dev/dbft/block"
|
||||
"github.com/nspcc-dev/dbft/crypto"
|
||||
"github.com/nspcc-dev/dbft/merkle"
|
||||
"github.com/nspcc-dev/dbft/payload"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core"
|
||||
coreb "github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/cache"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -41,9 +46,6 @@ type Service interface {
|
|||
OnTransaction(tx *transaction.Transaction)
|
||||
// GetPayload returns Payload with specified hash if it is present in the local cache.
|
||||
GetPayload(h util.Uint256) *Payload
|
||||
// OnNewBlock notifies consensus service that there is a new block in
|
||||
// the chain (without explicitly passing it to the service).
|
||||
OnNewBlock()
|
||||
}
|
||||
|
||||
type service struct {
|
||||
|
@ -51,9 +53,9 @@ type service struct {
|
|||
|
||||
log *zap.Logger
|
||||
// cache is a fifo cache which stores recent payloads.
|
||||
cache *relayCache
|
||||
cache *cache.HashCache
|
||||
// txx is a fifo cache which stores miner transactions.
|
||||
txx *relayCache
|
||||
txx *cache.HashCache
|
||||
dbft *dbft.DBFT
|
||||
// messages and transactions are channels needed to process
|
||||
// everything in single thread.
|
||||
|
@ -61,9 +63,12 @@ type service struct {
|
|||
transactions chan *transaction.Transaction
|
||||
// blockEvents is used to pass a new block event to the consensus
|
||||
// process.
|
||||
blockEvents chan struct{}
|
||||
blockEvents chan *coreb.Block
|
||||
lastProposal []util.Uint256
|
||||
wallet *wallet.Wallet
|
||||
// started is a flag set with Start method that runs an event handling
|
||||
// goroutine.
|
||||
started *atomic.Bool
|
||||
}
|
||||
|
||||
// Config is a configuration for consensus services.
|
||||
|
@ -72,10 +77,7 @@ type Config struct {
|
|||
Logger *zap.Logger
|
||||
// Broadcast is a callback which is called to notify server
|
||||
// about new consensus payload to sent.
|
||||
Broadcast func(p *Payload)
|
||||
// RelayBlock is a callback that is called to notify server
|
||||
// about the new block that needs to be broadcasted.
|
||||
RelayBlock func(b *coreb.Block)
|
||||
Broadcast func(cache.Hashable)
|
||||
// Chain is a core.Blockchainer instance.
|
||||
Chain core.Blockchainer
|
||||
// RequestTx is a callback to which will be called
|
||||
|
@ -101,12 +103,13 @@ func NewService(cfg Config) (Service, error) {
|
|||
Config: cfg,
|
||||
|
||||
log: cfg.Logger,
|
||||
cache: newFIFOCache(cacheMaxCapacity),
|
||||
txx: newFIFOCache(cacheMaxCapacity),
|
||||
cache: cache.NewFIFOCache(cacheMaxCapacity),
|
||||
txx: cache.NewFIFOCache(cacheMaxCapacity),
|
||||
messages: make(chan Payload, 100),
|
||||
|
||||
transactions: make(chan *transaction.Transaction, 100),
|
||||
blockEvents: make(chan struct{}, 1),
|
||||
blockEvents: make(chan *coreb.Block, 1),
|
||||
started: atomic.NewBool(false),
|
||||
}
|
||||
|
||||
if cfg.Wallet == nil {
|
||||
|
@ -119,13 +122,25 @@ func NewService(cfg Config) (Service, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Check that wallet password is correct for at least one account.
|
||||
var ok bool
|
||||
for _, acc := range srv.wallet.Accounts {
|
||||
err := acc.Decrypt(srv.Config.Wallet.Password)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("no account with provided password was found")
|
||||
}
|
||||
|
||||
defer srv.wallet.Close()
|
||||
|
||||
srv.dbft = dbft.New(
|
||||
dbft.WithLogger(srv.log),
|
||||
dbft.WithSecondsPerBlock(cfg.TimePerBlock),
|
||||
dbft.WithGetKeyPair(srv.getKeyPair),
|
||||
dbft.WithTxPerBlock(10000),
|
||||
dbft.WithRequestTx(cfg.RequestTx),
|
||||
dbft.WithGetTx(srv.getTx),
|
||||
dbft.WithGetVerified(srv.getVerifiedTx),
|
||||
|
@ -134,19 +149,21 @@ func NewService(cfg Config) (Service, error) {
|
|||
dbft.WithVerifyBlock(srv.verifyBlock),
|
||||
dbft.WithGetBlock(srv.getBlock),
|
||||
dbft.WithWatchOnly(func() bool { return false }),
|
||||
dbft.WithNewBlock(func() block.Block { return new(neoBlock) }),
|
||||
dbft.WithNewBlockFromContext(srv.newBlockFromContext),
|
||||
dbft.WithCurrentHeight(cfg.Chain.BlockHeight),
|
||||
dbft.WithCurrentBlockHash(cfg.Chain.CurrentBlockHash),
|
||||
dbft.WithGetValidators(srv.getValidators),
|
||||
dbft.WithGetConsensusAddress(srv.getConsensusAddress),
|
||||
|
||||
dbft.WithNewConsensusPayload(func() payload.ConsensusPayload { return new(Payload) }),
|
||||
dbft.WithNewPrepareRequest(func() payload.PrepareRequest { return new(prepareRequest) }),
|
||||
dbft.WithNewPrepareResponse(func() payload.PrepareResponse { return new(prepareResponse) }),
|
||||
dbft.WithNewConsensusPayload(srv.newPayload),
|
||||
dbft.WithNewPrepareRequest(srv.newPrepareRequest),
|
||||
dbft.WithNewPrepareResponse(srv.newPrepareResponse),
|
||||
dbft.WithNewChangeView(func() payload.ChangeView { return new(changeView) }),
|
||||
dbft.WithNewCommit(func() payload.Commit { return new(commit) }),
|
||||
dbft.WithNewCommit(srv.newCommit),
|
||||
dbft.WithNewRecoveryRequest(func() payload.RecoveryRequest { return new(recoveryRequest) }),
|
||||
dbft.WithNewRecoveryMessage(func() payload.RecoveryMessage { return new(recoveryMessage) }),
|
||||
dbft.WithNewRecoveryMessage(srv.newRecoveryMessage),
|
||||
dbft.WithVerifyPrepareRequest(srv.verifyRequest),
|
||||
dbft.WithVerifyPrepareResponse(srv.verifyResponse),
|
||||
)
|
||||
|
||||
if srv.dbft == nil {
|
||||
|
@ -162,15 +179,18 @@ var (
|
|||
)
|
||||
|
||||
func (s *service) Start() {
|
||||
s.dbft.Start()
|
||||
|
||||
go s.eventLoop()
|
||||
if s.started.CAS(false, true) {
|
||||
s.dbft.Start()
|
||||
s.Chain.SubscribeForBlocks(s.blockEvents)
|
||||
go s.eventLoop()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) eventLoop() {
|
||||
for {
|
||||
select {
|
||||
case hv := <-s.dbft.Timer.C():
|
||||
case <-s.dbft.Timer.C():
|
||||
hv := s.dbft.Timer.HV()
|
||||
s.log.Debug("timer fired",
|
||||
zap.Uint32("height", hv.Height),
|
||||
zap.Uint("view", uint(hv.View)))
|
||||
|
@ -203,15 +223,102 @@ func (s *service) eventLoop() {
|
|||
s.dbft.OnReceive(&msg)
|
||||
case tx := <-s.transactions:
|
||||
s.dbft.OnTransaction(tx)
|
||||
case <-s.blockEvents:
|
||||
s.log.Debug("new block in the chain",
|
||||
zap.Uint32("dbft index", s.dbft.BlockIndex),
|
||||
zap.Uint32("chain index", s.Chain.BlockHeight()))
|
||||
s.dbft.InitializeConsensus(0)
|
||||
case b := <-s.blockEvents:
|
||||
// We also receive our own blocks here, so check for index.
|
||||
if b.Index >= s.dbft.BlockIndex {
|
||||
s.log.Debug("new block in the chain",
|
||||
zap.Uint32("dbft index", s.dbft.BlockIndex),
|
||||
zap.Uint32("chain index", s.Chain.BlockHeight()))
|
||||
s.lastProposal = nil
|
||||
s.dbft.InitializeConsensus(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) newPayload() payload.ConsensusPayload {
|
||||
return &Payload{
|
||||
message: &message{
|
||||
stateRootEnabled: s.stateRootEnabled(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// stateRootEnabled checks if state root feature is enabled on current height.
|
||||
// It should be called only from dbft callbacks and is not protected by any mutex.
|
||||
func (s *service) stateRootEnabled() bool {
|
||||
return s.Chain.GetConfig().EnableStateRoot
|
||||
}
|
||||
|
||||
func (s *service) newPrepareRequest() payload.PrepareRequest {
|
||||
res := &prepareRequest{
|
||||
stateRootEnabled: s.stateRootEnabled(),
|
||||
}
|
||||
if !s.stateRootEnabled() {
|
||||
return res
|
||||
}
|
||||
sig := s.getStateRootSig()
|
||||
if sig != nil {
|
||||
copy(res.stateRootSig[:], sig)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *service) getStateRootSig() []byte {
|
||||
var sig []byte
|
||||
|
||||
sr, err := s.Chain.GetStateRoot(s.dbft.BlockIndex - 1)
|
||||
if err == nil {
|
||||
data := sr.GetSignedPart()
|
||||
sig, _ = s.dbft.Priv.Sign(data)
|
||||
}
|
||||
return sig
|
||||
}
|
||||
|
||||
func (s *service) newCommit() payload.Commit {
|
||||
if s.stateRootEnabled() && s.dbft.Context.BlockIndex > s.Chain.GetConfig().StateRootEnableIndex {
|
||||
// This is being called when we're ready to commit, so we can safely
|
||||
// relay stateroot here.
|
||||
stateRoot, err := s.Chain.GetStateRoot(s.dbft.Context.BlockIndex - 1)
|
||||
if err != nil {
|
||||
s.log.Warn("can't get stateroot", zap.Uint32("block", s.dbft.Context.BlockIndex-1))
|
||||
}
|
||||
r := stateRoot.MPTRoot
|
||||
r.Witness = s.getWitness(func(ctx dbft.Context, i int) []byte {
|
||||
if p := ctx.PreparationPayloads[i]; p != nil && p.ViewNumber() == ctx.ViewNumber {
|
||||
if int(ctx.PrimaryIndex) == i {
|
||||
return p.GetPrepareRequest().(*prepareRequest).stateRootSig[:]
|
||||
}
|
||||
return p.GetPrepareResponse().(*prepareResponse).stateRootSig[:]
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := s.Chain.AddStateRoot(&r); err != nil {
|
||||
s.log.Warn("errors while adding state root", zap.Error(err))
|
||||
}
|
||||
s.Broadcast(&r)
|
||||
}
|
||||
return new(commit)
|
||||
}
|
||||
|
||||
func (s *service) newPrepareResponse() payload.PrepareResponse {
|
||||
res := &prepareResponse{
|
||||
stateRootEnabled: s.stateRootEnabled(),
|
||||
}
|
||||
if !s.stateRootEnabled() {
|
||||
return res
|
||||
}
|
||||
sig := s.getStateRootSig()
|
||||
if sig != nil {
|
||||
copy(res.stateRootSig[:], sig)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *service) newRecoveryMessage() payload.RecoveryMessage {
|
||||
return &recoveryMessage{stateRootEnabled: s.stateRootEnabled()}
|
||||
}
|
||||
|
||||
func (s *service) validatePayload(p *Payload) bool {
|
||||
validators := s.getValidators()
|
||||
if int(p.validatorIndex) >= len(validators) {
|
||||
|
@ -234,7 +341,8 @@ func (s *service) getKeyPair(pubs []crypto.PublicKey) (int, crypto.PrivateKey, c
|
|||
|
||||
key, err := keys.NEP2Decrypt(acc.EncryptedWIF, s.Config.Wallet.Password)
|
||||
if err != nil {
|
||||
continue
|
||||
s.log.Fatal("can't unlock account", zap.String("address", address.Uint160ToString(sh)))
|
||||
break
|
||||
}
|
||||
|
||||
return i, &privateKey{PrivateKey: key}, &publicKey{PublicKey: key.PublicKey()}
|
||||
|
@ -245,7 +353,7 @@ func (s *service) getKeyPair(pubs []crypto.PublicKey) (int, crypto.PrivateKey, c
|
|||
|
||||
// OnPayload handles Payload receive.
|
||||
func (s *service) OnPayload(cp *Payload) {
|
||||
log := s.log.With(zap.Stringer("hash", cp.Hash()), zap.Stringer("type", cp.Type()))
|
||||
log := s.log.With(zap.Stringer("hash", cp.Hash()))
|
||||
if !s.validatePayload(cp) {
|
||||
log.Debug("can't validate payload")
|
||||
return
|
||||
|
@ -257,17 +365,17 @@ func (s *service) OnPayload(cp *Payload) {
|
|||
s.Config.Broadcast(cp)
|
||||
s.cache.Add(cp)
|
||||
|
||||
if s.dbft == nil {
|
||||
log.Debug("dbft is nil")
|
||||
if s.dbft == nil || !s.started.Load() {
|
||||
log.Debug("dbft is inactive or not started yet")
|
||||
return
|
||||
}
|
||||
|
||||
// we use switch here because other payloads could be possibly added in future
|
||||
switch cp.Type() {
|
||||
case payload.PrepareRequestType:
|
||||
req := cp.GetPrepareRequest().(*prepareRequest)
|
||||
s.txx.Add(&req.minerTx)
|
||||
s.lastProposal = req.transactionHashes
|
||||
// decode payload data into message
|
||||
if cp.message == nil {
|
||||
if err := cp.decodeData(s.stateRootEnabled()); err != nil {
|
||||
log.Debug("can't decode payload data", zap.Error(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.messages <- *cp
|
||||
|
@ -279,20 +387,6 @@ func (s *service) OnTransaction(tx *transaction.Transaction) {
|
|||
}
|
||||
}
|
||||
|
||||
// OnNewBlock notifies consensus process that there is a new block in the chain
|
||||
// and dbft should probably be reinitialized.
|
||||
func (s *service) OnNewBlock() {
|
||||
if s.dbft != nil {
|
||||
// If there is something in the queue already, the second
|
||||
// consecutive event doesn't make much sense (reinitializing
|
||||
// dbft twice doesn't improve it in any way).
|
||||
select {
|
||||
case s.blockEvents <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetPayload returns payload stored in cache.
|
||||
func (s *service) GetPayload(h util.Uint256) *Payload {
|
||||
p := s.cache.Get(h)
|
||||
|
@ -348,6 +442,46 @@ func (s *service) verifyBlock(b block.Block) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (s *service) verifyStateRootSig(index int, sig []byte) error {
|
||||
r, err := s.Chain.GetStateRoot(s.dbft.BlockIndex - 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get local state root: %v", err)
|
||||
}
|
||||
validators := s.getValidators()
|
||||
if index >= len(validators) {
|
||||
return errors.New("bad validator index")
|
||||
}
|
||||
|
||||
pub := validators[index]
|
||||
if pub.Verify(r.GetSignedPart(), sig) != nil {
|
||||
return errors.New("bad state root signature")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) verifyRequest(p payload.ConsensusPayload) error {
|
||||
req := p.GetPrepareRequest().(*prepareRequest)
|
||||
if s.stateRootEnabled() {
|
||||
err := s.verifyStateRootSig(int(p.ValidatorIndex()), req.stateRootSig[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Save lastProposal for getVerified().
|
||||
s.txx.Add(&req.minerTx)
|
||||
s.lastProposal = req.transactionHashes
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) verifyResponse(p payload.ConsensusPayload) error {
|
||||
if !s.stateRootEnabled() {
|
||||
return nil
|
||||
}
|
||||
resp := p.GetPrepareResponse().(*prepareResponse)
|
||||
return s.verifyStateRootSig(int(p.ValidatorIndex()), resp.stateRootSig[:])
|
||||
}
|
||||
|
||||
func (s *service) processBlock(b block.Block) {
|
||||
bb := &b.(*neoBlock).Block
|
||||
bb.Script = *(s.getBlockWitness(bb))
|
||||
|
@ -358,19 +492,28 @@ func (s *service) processBlock(b block.Block) {
|
|||
if _, errget := s.Chain.GetBlock(bb.Hash()); errget != nil {
|
||||
s.log.Warn("error on add block", zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
s.Config.RelayBlock(bb)
|
||||
}
|
||||
s.lastProposal = nil
|
||||
}
|
||||
|
||||
func (s *service) getBlockWitness(b *coreb.Block) *transaction.Witness {
|
||||
func (s *service) getBlockWitness(_ *coreb.Block) *transaction.Witness {
|
||||
return s.getWitness(func(ctx dbft.Context, i int) []byte {
|
||||
if p := ctx.CommitPayloads[i]; p != nil && p.ViewNumber() == ctx.ViewNumber {
|
||||
return p.GetCommit().Signature()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *service) getWitness(f func(dbft.Context, int) []byte) *transaction.Witness {
|
||||
dctx := s.dbft.Context
|
||||
pubs := convertKeys(dctx.Validators)
|
||||
sigs := make(map[*keys.PublicKey][]byte)
|
||||
|
||||
for i := range pubs {
|
||||
if p := dctx.CommitPayloads[i]; p != nil && p.ViewNumber() == dctx.ViewNumber {
|
||||
sigs[pubs[i]] = p.GetCommit().Signature()
|
||||
sig := f(dctx, i)
|
||||
if sig != nil {
|
||||
sigs[pubs[i]] = sig
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -407,12 +550,12 @@ func (s *service) getBlock(h util.Uint256) block.Block {
|
|||
return &neoBlock{Block: *b}
|
||||
}
|
||||
|
||||
func (s *service) getVerifiedTx(count int) []block.Transaction {
|
||||
func (s *service) getVerifiedTx() []block.Transaction {
|
||||
pool := s.Config.Chain.GetMemPool()
|
||||
|
||||
var txx []mempool.TxWithFee
|
||||
|
||||
if s.dbft.ViewNumber > 0 {
|
||||
if s.dbft.ViewNumber > 0 && len(s.lastProposal) > 0 {
|
||||
txx = make([]mempool.TxWithFee, 0, len(s.lastProposal))
|
||||
for i := range s.lastProposal {
|
||||
if tx, fee, ok := pool.TryGetValue(s.lastProposal[i]); ok {
|
||||
|
@ -521,3 +664,21 @@ func convertKeys(validators []crypto.PublicKey) (pubs []*keys.PublicKey) {
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *service) newBlockFromContext(ctx *dbft.Context) block.Block {
|
||||
block := new(neoBlock)
|
||||
if len(ctx.TransactionHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
block.Block.Timestamp = uint32(ctx.Timestamp / 1000000000)
|
||||
block.Block.Index = ctx.BlockIndex
|
||||
block.Block.NextConsensus = ctx.NextConsensus
|
||||
block.Block.PrevHash = ctx.PrevHash
|
||||
block.Block.Version = ctx.Version
|
||||
block.Block.ConsensusData = ctx.Nonce
|
||||
|
||||
mt := merkle.NewMerkleTree(ctx.TransactionHashes...)
|
||||
block.Block.MerkleRoot = mt.Root().Hash
|
||||
return block
|
||||
}
|
||||
|
|
|
@ -2,11 +2,13 @@ package consensus
|
|||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/dbft/block"
|
||||
"github.com/nspcc-dev/dbft/payload"
|
||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/cache"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -25,7 +27,7 @@ func TestNewService(t *testing.T) {
|
|||
require.NoError(t, srv.Chain.PoolTx(tx))
|
||||
|
||||
var txx []block.Transaction
|
||||
require.NotPanics(t, func() { txx = srv.getVerifiedTx(1) })
|
||||
require.NotPanics(t, func() { txx = srv.getVerifiedTx() })
|
||||
require.Len(t, txx, 2)
|
||||
require.Equal(t, tx, txx[1])
|
||||
srv.Chain.Close()
|
||||
|
@ -33,6 +35,8 @@ func TestNewService(t *testing.T) {
|
|||
|
||||
func TestService_GetVerified(t *testing.T) {
|
||||
srv := newTestService(t)
|
||||
srv.dbft.Start()
|
||||
|
||||
txs := []*transaction.Transaction{
|
||||
newMinerTx(1),
|
||||
newMinerTx(2),
|
||||
|
@ -43,21 +47,36 @@ func TestService_GetVerified(t *testing.T) {
|
|||
|
||||
hashes := []util.Uint256{txs[0].Hash(), txs[1].Hash(), txs[2].Hash()}
|
||||
|
||||
p := new(Payload)
|
||||
p.SetType(payload.PrepareRequestType)
|
||||
p.SetPayload(&prepareRequest{transactionHashes: hashes, minerTx: *newMinerTx(999)})
|
||||
p.SetValidatorIndex(1)
|
||||
// Everyone sends a message.
|
||||
for i := 0; i < 4; i++ {
|
||||
p := srv.newPayload().(*Payload)
|
||||
p.SetHeight(1)
|
||||
p.SetValidatorIndex(uint16(i))
|
||||
priv, _ := getTestValidator(i)
|
||||
// To properly sign stateroot in prepare request.
|
||||
srv.dbft.Priv = priv
|
||||
// One PrepareRequest and three ChangeViews.
|
||||
if i == 1 {
|
||||
p.SetType(payload.PrepareRequestType)
|
||||
preq := srv.newPrepareRequest().(*prepareRequest)
|
||||
preq.transactionHashes = hashes
|
||||
preq.minerTx = *newMinerTx(999)
|
||||
p.SetPayload(preq)
|
||||
} else {
|
||||
p.SetType(payload.ChangeViewType)
|
||||
p.SetPayload(&changeView{newViewNumber: 1, timestamp: uint32(time.Now().Unix())})
|
||||
}
|
||||
|
||||
priv, _ := getTestValidator(1)
|
||||
require.NoError(t, p.Sign(priv))
|
||||
require.NoError(t, p.Sign(priv))
|
||||
|
||||
srv.OnPayload(p)
|
||||
// Skip srv.OnPayload, because the service is not really started.
|
||||
srv.dbft.OnReceive(p)
|
||||
}
|
||||
require.Equal(t, uint8(1), srv.dbft.ViewNumber)
|
||||
require.Equal(t, hashes, srv.lastProposal)
|
||||
|
||||
srv.dbft.ViewNumber = 1
|
||||
|
||||
t.Run("new transactions will be proposed in case of failure", func(t *testing.T) {
|
||||
txx := srv.getVerifiedTx(10)
|
||||
txx := srv.getVerifiedTx()
|
||||
require.Equal(t, 2, len(txx), "there is only 1 tx in mempool")
|
||||
require.Equal(t, txs[3], txx[1])
|
||||
})
|
||||
|
@ -67,7 +86,7 @@ func TestService_GetVerified(t *testing.T) {
|
|||
require.NoError(t, srv.Chain.PoolTx(tx))
|
||||
}
|
||||
|
||||
txx := srv.getVerifiedTx(10)
|
||||
txx := srv.getVerifiedTx()
|
||||
require.Contains(t, txx, txs[0])
|
||||
require.Contains(t, txx, txs[1])
|
||||
require.NotContains(t, txx, txs[2])
|
||||
|
@ -79,6 +98,7 @@ func TestService_ValidatePayload(t *testing.T) {
|
|||
srv := newTestService(t)
|
||||
priv, _ := getTestValidator(1)
|
||||
p := new(Payload)
|
||||
p.message = &message{}
|
||||
|
||||
p.SetPayload(&prepareRequest{})
|
||||
|
||||
|
@ -138,9 +158,14 @@ func TestService_getTx(t *testing.T) {
|
|||
|
||||
func TestService_OnPayload(t *testing.T) {
|
||||
srv := newTestService(t)
|
||||
// This test directly reads things from srv.messages that normally
|
||||
// is read by internal goroutine started with Start(). So let's
|
||||
// pretend we really did start already.
|
||||
srv.started.Store(true)
|
||||
|
||||
priv, _ := getTestValidator(1)
|
||||
p := new(Payload)
|
||||
p.message = &message{}
|
||||
p.SetValidatorIndex(1)
|
||||
p.SetPayload(&prepareRequest{})
|
||||
|
||||
|
@ -179,7 +204,7 @@ func shouldNotReceive(t *testing.T, ch chan Payload) {
|
|||
func newTestService(t *testing.T) *service {
|
||||
srv, err := NewService(Config{
|
||||
Logger: zaptest.NewLogger(t),
|
||||
Broadcast: func(*Payload) {},
|
||||
Broadcast: func(cache.Hashable) {},
|
||||
Chain: newTestChain(t),
|
||||
RequestTx: func(...util.Uint256) {},
|
||||
Wallet: &wallet.Config{
|
||||
|
|
|
@ -22,13 +22,16 @@ type (
|
|||
Type messageType
|
||||
ViewNumber byte
|
||||
|
||||
stateRootEnabled bool
|
||||
|
||||
payload io.Serializable
|
||||
}
|
||||
|
||||
// Payload is a type for consensus-related messages.
|
||||
Payload struct {
|
||||
message
|
||||
*message
|
||||
|
||||
data []byte
|
||||
version uint32
|
||||
validatorIndex uint16
|
||||
prevHash util.Uint256
|
||||
|
@ -46,6 +49,8 @@ const (
|
|||
commitType messageType = 0x30
|
||||
recoveryRequestType messageType = 0x40
|
||||
recoveryMessageType messageType = 0x41
|
||||
|
||||
nanoInSec = 1000_000_000
|
||||
)
|
||||
|
||||
// ViewNumber implements payload.ConsensusPayload interface.
|
||||
|
@ -168,9 +173,12 @@ func (p *Payload) EncodeBinaryUnsigned(w *io.BinWriter) {
|
|||
w.WriteU16LE(p.validatorIndex)
|
||||
w.WriteU32LE(p.timestamp)
|
||||
|
||||
ww := io.NewBufBinWriter()
|
||||
p.message.EncodeBinary(ww.BinWriter)
|
||||
w.WriteVarBytes(ww.Bytes())
|
||||
if p.message != nil {
|
||||
ww := io.NewBufBinWriter()
|
||||
p.message.EncodeBinary(ww.BinWriter)
|
||||
p.data = ww.Bytes()
|
||||
}
|
||||
w.WriteVarBytes(p.data)
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
|
@ -227,14 +235,10 @@ func (p *Payload) DecodeBinaryUnsigned(r *io.BinReader) {
|
|||
p.validatorIndex = r.ReadU16LE()
|
||||
p.timestamp = r.ReadU32LE()
|
||||
|
||||
data := r.ReadVarBytes()
|
||||
p.data = r.ReadVarBytes()
|
||||
if r.Err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
rr := io.NewBinReaderFromBuf(data)
|
||||
p.message.DecodeBinary(rr)
|
||||
r.Err = rr.Err
|
||||
}
|
||||
|
||||
// Hash implements payload.ConsensusPayload interface.
|
||||
|
@ -283,15 +287,21 @@ func (m *message) DecodeBinary(r *io.BinReader) {
|
|||
cv.newViewNumber = m.ViewNumber + 1
|
||||
m.payload = cv
|
||||
case prepareRequestType:
|
||||
m.payload = new(prepareRequest)
|
||||
m.payload = &prepareRequest{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
}
|
||||
case prepareResponseType:
|
||||
m.payload = new(prepareResponse)
|
||||
m.payload = &prepareResponse{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
}
|
||||
case commitType:
|
||||
m.payload = new(commit)
|
||||
case recoveryRequestType:
|
||||
m.payload = new(recoveryRequest)
|
||||
case recoveryMessageType:
|
||||
m.payload = new(recoveryMessage)
|
||||
m.payload = &recoveryMessage{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
}
|
||||
default:
|
||||
r.Err = errors.Errorf("invalid type: 0x%02x", byte(m.Type))
|
||||
return
|
||||
|
@ -318,3 +328,15 @@ func (t messageType) String() string {
|
|||
return fmt.Sprintf("UNKNOWN(0x%02x)", byte(t))
|
||||
}
|
||||
}
|
||||
|
||||
// decodeData decodes data of payload into it's message.
|
||||
func (p *Payload) decodeData(stateRootEnabled bool) error {
|
||||
m := &message{stateRootEnabled: stateRootEnabled}
|
||||
br := io.NewBinReaderFromBuf(p.data)
|
||||
m.DecodeBinary(br)
|
||||
if br.Err != nil {
|
||||
return errors.Wrap(br.Err, "cannot decode data into message")
|
||||
}
|
||||
p.message = m
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ var messageTypes = []messageType{
|
|||
|
||||
func TestConsensusPayload_Setters(t *testing.T) {
|
||||
var p Payload
|
||||
p.message = &message{}
|
||||
|
||||
p.SetVersion(1)
|
||||
assert.EqualValues(t, 1, p.Version())
|
||||
|
@ -86,11 +87,20 @@ func TestConsensusPayload_Hash(t *testing.T) {
|
|||
func TestConsensusPayload_Serializable(t *testing.T) {
|
||||
for _, mt := range messageTypes {
|
||||
p := randomPayload(t, mt)
|
||||
testserdes.EncodeDecodeBinary(t, p, new(Payload))
|
||||
actual := new(Payload)
|
||||
data, err := testserdes.EncodeBinary(p)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, testserdes.DecodeBinary(data, actual))
|
||||
// message is nil after decoding as we didn't yet call decodeData
|
||||
require.Nil(t, actual.message)
|
||||
// message should now be decoded from actual.data byte array
|
||||
assert.NoError(t, actual.decodeData(false))
|
||||
require.Equal(t, p, actual)
|
||||
|
||||
data := p.MarshalUnsigned()
|
||||
data = p.MarshalUnsigned()
|
||||
pu := new(Payload)
|
||||
require.NoError(t, pu.UnmarshalUnsigned(data))
|
||||
assert.NoError(t, pu.decodeData(false))
|
||||
|
||||
p.Witness = transaction.Witness{}
|
||||
require.Equal(t, p, pu)
|
||||
|
@ -115,7 +125,7 @@ func TestConsensusPayload_DecodeBinaryInvalid(t *testing.T) {
|
|||
buf := make([]byte, 46+1+34+1+2)
|
||||
|
||||
expected := &Payload{
|
||||
message: message{
|
||||
message: &message{
|
||||
Type: prepareResponseType,
|
||||
payload: &prepareResponse{},
|
||||
},
|
||||
|
@ -124,6 +134,8 @@ func TestConsensusPayload_DecodeBinaryInvalid(t *testing.T) {
|
|||
VerificationScript: []byte{},
|
||||
},
|
||||
}
|
||||
// fill `data` for next check
|
||||
_ = expected.Hash()
|
||||
|
||||
// valid payload
|
||||
buf[delimeterIndex] = 1
|
||||
|
@ -131,11 +143,15 @@ func TestConsensusPayload_DecodeBinaryInvalid(t *testing.T) {
|
|||
buf[typeIndex] = byte(prepareResponseType)
|
||||
p := new(Payload)
|
||||
require.NoError(t, testserdes.DecodeBinary(buf, p))
|
||||
// decode `data` into `message`
|
||||
assert.NoError(t, p.decodeData(false))
|
||||
require.Equal(t, expected, p)
|
||||
|
||||
// invalid type
|
||||
buf[typeIndex] = 0xFF
|
||||
require.Error(t, testserdes.DecodeBinary(buf, new(Payload)))
|
||||
actual := new(Payload)
|
||||
require.NoError(t, testserdes.DecodeBinary(buf, actual))
|
||||
require.Error(t, actual.decodeData(false))
|
||||
|
||||
// invalid format
|
||||
buf[delimeterIndex] = 0
|
||||
|
@ -149,19 +165,26 @@ func TestConsensusPayload_DecodeBinaryInvalid(t *testing.T) {
|
|||
require.Error(t, testserdes.DecodeBinary(buf, new(Payload)))
|
||||
}
|
||||
|
||||
func testEncodeDecode(srEnabled bool, mt messageType, actual io.Serializable) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
expected := randomMessage(t, mt, srEnabled)
|
||||
testserdes.EncodeDecodeBinary(t, expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommit_Serializable(t *testing.T) {
|
||||
c := randomMessage(t, commitType)
|
||||
testserdes.EncodeDecodeBinary(t, c, new(commit))
|
||||
testEncodeDecode(false, commitType, &commit{})
|
||||
}
|
||||
|
||||
func TestPrepareResponse_Serializable(t *testing.T) {
|
||||
resp := randomMessage(t, prepareResponseType)
|
||||
testserdes.EncodeDecodeBinary(t, resp, new(prepareResponse))
|
||||
t.Run("WithStateRoot", testEncodeDecode(true, prepareResponseType, &prepareResponse{stateRootEnabled: true}))
|
||||
t.Run("NoStateRoot", testEncodeDecode(false, prepareResponseType, &prepareResponse{stateRootEnabled: false}))
|
||||
|
||||
}
|
||||
|
||||
func TestPrepareRequest_Serializable(t *testing.T) {
|
||||
req := randomMessage(t, prepareRequestType)
|
||||
testserdes.EncodeDecodeBinary(t, req, new(prepareRequest))
|
||||
t.Run("WithStateRoot", testEncodeDecode(true, prepareRequestType, &prepareRequest{stateRootEnabled: true}))
|
||||
t.Run("NoStateRoot", testEncodeDecode(false, prepareRequestType, &prepareRequest{stateRootEnabled: false}))
|
||||
}
|
||||
|
||||
func TestRecoveryRequest_Serializable(t *testing.T) {
|
||||
|
@ -170,13 +193,13 @@ func TestRecoveryRequest_Serializable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRecoveryMessage_Serializable(t *testing.T) {
|
||||
msg := randomMessage(t, recoveryMessageType)
|
||||
testserdes.EncodeDecodeBinary(t, msg, new(recoveryMessage))
|
||||
t.Run("WithStateRoot", testEncodeDecode(true, recoveryMessageType, &recoveryMessage{stateRootEnabled: true}))
|
||||
t.Run("NoStateRoot", testEncodeDecode(false, recoveryMessageType, &recoveryMessage{stateRootEnabled: false}))
|
||||
}
|
||||
|
||||
func randomPayload(t *testing.T, mt messageType) *Payload {
|
||||
p := &Payload{
|
||||
message: message{
|
||||
message: &message{
|
||||
Type: mt,
|
||||
ViewNumber: byte(rand.Uint32()),
|
||||
payload: randomMessage(t, mt),
|
||||
|
@ -199,16 +222,21 @@ func randomPayload(t *testing.T, mt messageType) *Payload {
|
|||
return p
|
||||
}
|
||||
|
||||
func randomMessage(t *testing.T, mt messageType) io.Serializable {
|
||||
func randomMessage(t *testing.T, mt messageType, srEnabled ...bool) io.Serializable {
|
||||
switch mt {
|
||||
case changeViewType:
|
||||
return &changeView{
|
||||
timestamp: rand.Uint32(),
|
||||
}
|
||||
case prepareRequestType:
|
||||
return randomPrepareRequest(t)
|
||||
return randomPrepareRequest(t, srEnabled...)
|
||||
case prepareResponseType:
|
||||
return &prepareResponse{preparationHash: random.Uint256()}
|
||||
var p = prepareResponse{preparationHash: random.Uint256()}
|
||||
if len(srEnabled) > 0 && srEnabled[0] {
|
||||
p.stateRootEnabled = true
|
||||
random.Fill(p.stateRootSig[:])
|
||||
}
|
||||
return &p
|
||||
case commitType:
|
||||
var c commit
|
||||
random.Fill(c.signature[:])
|
||||
|
@ -216,14 +244,14 @@ func randomMessage(t *testing.T, mt messageType) io.Serializable {
|
|||
case recoveryRequestType:
|
||||
return &recoveryRequest{timestamp: rand.Uint32()}
|
||||
case recoveryMessageType:
|
||||
return randomRecoveryMessage(t)
|
||||
return randomRecoveryMessage(t, srEnabled...)
|
||||
default:
|
||||
require.Fail(t, "invalid type")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func randomPrepareRequest(t *testing.T) *prepareRequest {
|
||||
func randomPrepareRequest(t *testing.T, srEnabled ...bool) *prepareRequest {
|
||||
const txCount = 3
|
||||
|
||||
req := &prepareRequest{
|
||||
|
@ -239,15 +267,20 @@ func randomPrepareRequest(t *testing.T) *prepareRequest {
|
|||
}
|
||||
req.nextConsensus = random.Uint160()
|
||||
|
||||
if len(srEnabled) > 0 && srEnabled[0] {
|
||||
req.stateRootEnabled = true
|
||||
random.Fill(req.stateRootSig[:])
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
func randomRecoveryMessage(t *testing.T) *recoveryMessage {
|
||||
result := randomMessage(t, prepareRequestType)
|
||||
func randomRecoveryMessage(t *testing.T, srEnabled ...bool) *recoveryMessage {
|
||||
result := randomMessage(t, prepareRequestType, srEnabled...)
|
||||
require.IsType(t, (*prepareRequest)(nil), result)
|
||||
prepReq := result.(*prepareRequest)
|
||||
|
||||
return &recoveryMessage{
|
||||
rec := &recoveryMessage{
|
||||
preparationPayloads: []*preparationCompact{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
|
@ -281,6 +314,16 @@ func randomRecoveryMessage(t *testing.T) *recoveryMessage {
|
|||
payload: prepReq,
|
||||
},
|
||||
}
|
||||
if len(srEnabled) > 0 && srEnabled[0] {
|
||||
rec.stateRootEnabled = true
|
||||
rec.prepareRequest.stateRootEnabled = true
|
||||
random.Fill(prepReq.stateRootSig[:])
|
||||
for _, p := range rec.preparationPayloads {
|
||||
p.stateRootEnabled = true
|
||||
random.Fill(p.StateRootSig[:])
|
||||
}
|
||||
}
|
||||
return rec
|
||||
}
|
||||
|
||||
func TestPayload_Sign(t *testing.T) {
|
||||
|
|
|
@ -14,6 +14,9 @@ type prepareRequest struct {
|
|||
transactionHashes []util.Uint256
|
||||
minerTx transaction.Transaction
|
||||
nextConsensus util.Uint160
|
||||
stateRootSig [signatureSize]byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
|
||||
var _ payload.PrepareRequest = (*prepareRequest)(nil)
|
||||
|
@ -25,6 +28,9 @@ func (p *prepareRequest) EncodeBinary(w *io.BinWriter) {
|
|||
w.WriteBytes(p.nextConsensus[:])
|
||||
w.WriteArray(p.transactionHashes)
|
||||
p.minerTx.EncodeBinary(w)
|
||||
if p.stateRootEnabled {
|
||||
w.WriteBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
|
@ -34,13 +40,16 @@ func (p *prepareRequest) DecodeBinary(r *io.BinReader) {
|
|||
r.ReadBytes(p.nextConsensus[:])
|
||||
r.ReadArray(&p.transactionHashes)
|
||||
p.minerTx.DecodeBinary(r)
|
||||
if p.stateRootEnabled {
|
||||
r.ReadBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Timestamp implements payload.PrepareRequest interface.
|
||||
func (p *prepareRequest) Timestamp() uint32 { return p.timestamp }
|
||||
func (p *prepareRequest) Timestamp() uint64 { return uint64(p.timestamp) * 1000000000 }
|
||||
|
||||
// SetTimestamp implements payload.PrepareRequest interface.
|
||||
func (p *prepareRequest) SetTimestamp(ts uint32) { p.timestamp = ts }
|
||||
func (p *prepareRequest) SetTimestamp(ts uint64) { p.timestamp = uint32(ts / 1000000000) }
|
||||
|
||||
// Nonce implements payload.PrepareRequest interface.
|
||||
func (p *prepareRequest) Nonce() uint64 { return p.nonce }
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
func TestPrepareRequest_Setters(t *testing.T) {
|
||||
var p prepareRequest
|
||||
|
||||
p.SetTimestamp(123)
|
||||
require.EqualValues(t, 123, p.Timestamp())
|
||||
p.SetTimestamp(123 * 1000000000) // Nanoseconds.
|
||||
require.EqualValues(t, 123*1000000000, p.Timestamp())
|
||||
|
||||
p.SetNextConsensus(util.Uint160{5, 6, 7})
|
||||
require.Equal(t, util.Uint160{5, 6, 7}, p.NextConsensus())
|
||||
|
|
|
@ -9,6 +9,9 @@ import (
|
|||
// prepareResponse represents dBFT PrepareResponse message.
|
||||
type prepareResponse struct {
|
||||
preparationHash util.Uint256
|
||||
stateRootSig [signatureSize]byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
|
||||
var _ payload.PrepareResponse = (*prepareResponse)(nil)
|
||||
|
@ -16,11 +19,17 @@ var _ payload.PrepareResponse = (*prepareResponse)(nil)
|
|||
// EncodeBinary implements io.Serializable interface.
|
||||
func (p *prepareResponse) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteBytes(p.preparationHash[:])
|
||||
if p.stateRootEnabled {
|
||||
w.WriteBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
func (p *prepareResponse) DecodeBinary(r *io.BinReader) {
|
||||
r.ReadBytes(p.preparationHash[:])
|
||||
if p.stateRootEnabled {
|
||||
r.ReadBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// PreparationHash implements payload.PrepareResponse interface.
|
||||
|
|
|
@ -3,6 +3,7 @@ package consensus
|
|||
import (
|
||||
"github.com/nspcc-dev/dbft/crypto"
|
||||
"github.com/nspcc-dev/dbft/payload"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -16,6 +17,8 @@ type (
|
|||
commitPayloads []*commitCompact
|
||||
changeViewPayloads []*changeViewCompact
|
||||
prepareRequest *message
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
|
||||
changeViewCompact struct {
|
||||
|
@ -35,6 +38,9 @@ type (
|
|||
preparationCompact struct {
|
||||
ValidatorIndex uint16
|
||||
InvocationScript []byte
|
||||
StateRootSig [signatureSize]byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -46,7 +52,7 @@ func (m *recoveryMessage) DecodeBinary(r *io.BinReader) {
|
|||
|
||||
var hasReq = r.ReadBool()
|
||||
if hasReq {
|
||||
m.prepareRequest = new(message)
|
||||
m.prepareRequest = &message{stateRootEnabled: m.stateRootEnabled}
|
||||
m.prepareRequest.DecodeBinary(r)
|
||||
if r.Err == nil && m.prepareRequest.Type != prepareRequestType {
|
||||
r.Err = errors.New("recovery message PrepareRequest has wrong type")
|
||||
|
@ -66,7 +72,16 @@ func (m *recoveryMessage) DecodeBinary(r *io.BinReader) {
|
|||
}
|
||||
}
|
||||
|
||||
r.ReadArray(&m.preparationPayloads)
|
||||
lu := r.ReadVarUint()
|
||||
if lu > state.MaxValidatorsVoted {
|
||||
r.Err = errors.New("too many preparation payloads")
|
||||
return
|
||||
}
|
||||
m.preparationPayloads = make([]*preparationCompact, lu)
|
||||
for i := uint64(0); i < lu; i++ {
|
||||
m.preparationPayloads[i] = &preparationCompact{stateRootEnabled: m.stateRootEnabled}
|
||||
m.preparationPayloads[i].DecodeBinary(r)
|
||||
}
|
||||
r.ReadArray(&m.commitPayloads)
|
||||
}
|
||||
|
||||
|
@ -96,7 +111,7 @@ func (p *changeViewCompact) DecodeBinary(r *io.BinReader) {
|
|||
p.ValidatorIndex = r.ReadU16LE()
|
||||
p.OriginalViewNumber = r.ReadB()
|
||||
p.Timestamp = r.ReadU32LE()
|
||||
p.InvocationScript = r.ReadVarBytes()
|
||||
p.InvocationScript = r.ReadVarBytes(1024)
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
|
@ -112,7 +127,7 @@ func (p *commitCompact) DecodeBinary(r *io.BinReader) {
|
|||
p.ViewNumber = r.ReadB()
|
||||
p.ValidatorIndex = r.ReadU16LE()
|
||||
r.ReadBytes(p.Signature[:])
|
||||
p.InvocationScript = r.ReadVarBytes()
|
||||
p.InvocationScript = r.ReadVarBytes(1024)
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
|
@ -126,13 +141,19 @@ func (p *commitCompact) EncodeBinary(w *io.BinWriter) {
|
|||
// DecodeBinary implements io.Serializable interface.
|
||||
func (p *preparationCompact) DecodeBinary(r *io.BinReader) {
|
||||
p.ValidatorIndex = r.ReadU16LE()
|
||||
p.InvocationScript = r.ReadVarBytes()
|
||||
p.InvocationScript = r.ReadVarBytes(1024)
|
||||
if p.stateRootEnabled {
|
||||
r.ReadBytes(p.StateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
func (p *preparationCompact) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteU16LE(p.ValidatorIndex)
|
||||
w.WriteVarBytes(p.InvocationScript)
|
||||
if p.stateRootEnabled {
|
||||
w.WriteBytes(p.StateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// AddPayload implements payload.RecoveryMessage interface.
|
||||
|
@ -143,17 +164,23 @@ func (m *recoveryMessage) AddPayload(p payload.ConsensusPayload) {
|
|||
Type: prepareRequestType,
|
||||
ViewNumber: p.ViewNumber(),
|
||||
payload: p.GetPrepareRequest().(*prepareRequest),
|
||||
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
}
|
||||
h := p.Hash()
|
||||
m.preparationHash = &h
|
||||
m.preparationPayloads = append(m.preparationPayloads, &preparationCompact{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
ValidatorIndex: p.ValidatorIndex(),
|
||||
InvocationScript: p.(*Payload).Witness.InvocationScript,
|
||||
StateRootSig: p.GetPrepareRequest().(*prepareRequest).stateRootSig,
|
||||
})
|
||||
case payload.PrepareResponseType:
|
||||
m.preparationPayloads = append(m.preparationPayloads, &preparationCompact{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
ValidatorIndex: p.ValidatorIndex(),
|
||||
InvocationScript: p.(*Payload).Witness.InvocationScript,
|
||||
StateRootSig: p.GetPrepareResponse().(*prepareResponse).stateRootSig,
|
||||
})
|
||||
|
||||
if m.preparationHash == nil {
|
||||
|
@ -164,7 +191,7 @@ func (m *recoveryMessage) AddPayload(p payload.ConsensusPayload) {
|
|||
m.changeViewPayloads = append(m.changeViewPayloads, &changeViewCompact{
|
||||
ValidatorIndex: p.ValidatorIndex(),
|
||||
OriginalViewNumber: p.ViewNumber(),
|
||||
Timestamp: p.GetChangeView().Timestamp(),
|
||||
Timestamp: p.GetChangeView().(*changeView).timestamp,
|
||||
InvocationScript: p.(*Payload).Witness.InvocationScript,
|
||||
})
|
||||
case payload.CommitType:
|
||||
|
@ -214,6 +241,9 @@ func (m *recoveryMessage) GetPrepareResponses(p payload.ConsensusPayload, valida
|
|||
for i, resp := range m.preparationPayloads {
|
||||
r := fromPayload(prepareResponseType, p.(*Payload), &prepareResponse{
|
||||
preparationHash: *m.preparationHash,
|
||||
stateRootSig: resp.StateRootSig,
|
||||
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
})
|
||||
r.SetValidatorIndex(resp.ValidatorIndex)
|
||||
r.Witness.InvocationScript = resp.InvocationScript
|
||||
|
@ -234,6 +264,7 @@ func (m *recoveryMessage) GetChangeViews(p payload.ConsensusPayload, validators
|
|||
newViewNumber: cv.OriginalViewNumber + 1,
|
||||
timestamp: cv.Timestamp,
|
||||
})
|
||||
c.message.ViewNumber = cv.OriginalViewNumber
|
||||
c.SetValidatorIndex(cv.ValidatorIndex)
|
||||
c.Witness.InvocationScript = cv.InvocationScript
|
||||
c.Witness.VerificationScript = getVerificationScript(cv.ValidatorIndex, validators)
|
||||
|
@ -249,7 +280,9 @@ func (m *recoveryMessage) GetCommits(p payload.ConsensusPayload, validators []cr
|
|||
ps := make([]payload.ConsensusPayload, len(m.commitPayloads))
|
||||
|
||||
for i, c := range m.commitPayloads {
|
||||
cc := fromPayload(commitType, p.(*Payload), &commit{signature: c.Signature})
|
||||
cc := fromPayload(commitType, p.(*Payload), &commit{
|
||||
signature: c.Signature,
|
||||
})
|
||||
cc.SetValidatorIndex(c.ValidatorIndex)
|
||||
cc.Witness.InvocationScript = c.InvocationScript
|
||||
cc.Witness.VerificationScript = getVerificationScript(c.ValidatorIndex, validators)
|
||||
|
@ -285,10 +318,12 @@ func getVerificationScript(i uint16, validators []crypto.PublicKey) []byte {
|
|||
|
||||
func fromPayload(t messageType, recovery *Payload, p io.Serializable) *Payload {
|
||||
return &Payload{
|
||||
message: message{
|
||||
message: &message{
|
||||
Type: t,
|
||||
ViewNumber: recovery.message.ViewNumber,
|
||||
payload: p,
|
||||
|
||||
stateRootEnabled: recovery.stateRootEnabled,
|
||||
},
|
||||
version: recovery.Version(),
|
||||
prevHash: recovery.PrevHash(),
|
||||
|
|
|
@ -24,6 +24,7 @@ func TestRecoveryMessage_Setters(t *testing.T) {
|
|||
|
||||
r := &recoveryMessage{}
|
||||
p := new(Payload)
|
||||
p.message = &message{}
|
||||
p.SetType(payload.RecoveryMessageType)
|
||||
p.SetPayload(r)
|
||||
// sign payload to have verification script
|
||||
|
@ -37,6 +38,7 @@ func TestRecoveryMessage_Setters(t *testing.T) {
|
|||
nextConsensus: util.Uint160{1, 2},
|
||||
}
|
||||
p1 := new(Payload)
|
||||
p1.message = &message{}
|
||||
p1.SetType(payload.PrepareRequestType)
|
||||
p1.SetPayload(req)
|
||||
p1.SetValidatorIndex(0)
|
||||
|
@ -44,6 +46,7 @@ func TestRecoveryMessage_Setters(t *testing.T) {
|
|||
|
||||
t.Run("prepare response is added", func(t *testing.T) {
|
||||
p2 := new(Payload)
|
||||
p2.message = &message{}
|
||||
p2.SetType(payload.PrepareResponseType)
|
||||
p2.SetPayload(&prepareResponse{
|
||||
preparationHash: p1.Hash(),
|
||||
|
@ -69,6 +72,7 @@ func TestRecoveryMessage_Setters(t *testing.T) {
|
|||
r.AddPayload(p1)
|
||||
pr = r.GetPrepareRequest(p, pubs, p1.ValidatorIndex())
|
||||
require.NotNil(t, pr)
|
||||
require.Equal(t, p1.Hash(), pr.Hash())
|
||||
require.Equal(t, p1, pr)
|
||||
|
||||
pl := pr.(*Payload)
|
||||
|
@ -77,6 +81,7 @@ func TestRecoveryMessage_Setters(t *testing.T) {
|
|||
|
||||
t.Run("change view is added", func(t *testing.T) {
|
||||
p3 := new(Payload)
|
||||
p3.message = &message{}
|
||||
p3.SetType(payload.ChangeViewType)
|
||||
p3.SetPayload(&changeView{
|
||||
newViewNumber: 1,
|
||||
|
@ -97,6 +102,7 @@ func TestRecoveryMessage_Setters(t *testing.T) {
|
|||
|
||||
t.Run("commit is added", func(t *testing.T) {
|
||||
p4 := new(Payload)
|
||||
p4.message = &message{}
|
||||
p4.SetType(payload.CommitType)
|
||||
p4.SetPayload(randomMessage(t, commitType))
|
||||
p4.SetValidatorIndex(4)
|
||||
|
|
|
@ -23,7 +23,7 @@ func (m *recoveryRequest) EncodeBinary(w *io.BinWriter) {
|
|||
}
|
||||
|
||||
// Timestamp implements payload.RecoveryRequest interface.
|
||||
func (m *recoveryRequest) Timestamp() uint32 { return m.timestamp }
|
||||
func (m *recoveryRequest) Timestamp() uint64 { return uint64(m.timestamp) * nanoInSec }
|
||||
|
||||
// SetTimestamp implements payload.RecoveryRequest interface.
|
||||
func (m *recoveryRequest) SetTimestamp(ts uint32) { m.timestamp = ts }
|
||||
func (m *recoveryRequest) SetTimestamp(ts uint64) { m.timestamp = uint32(ts / nanoInSec) }
|
||||
|
|
|
@ -9,6 +9,6 @@ import (
|
|||
func TestRecoveryRequest_Setters(t *testing.T) {
|
||||
var r recoveryRequest
|
||||
|
||||
r.SetTimestamp(123)
|
||||
require.EqualValues(t, 123, r.Timestamp())
|
||||
r.SetTimestamp(123 * nanoInSec)
|
||||
require.EqualValues(t, 123*nanoInSec, r.Timestamp())
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package block
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
|
@ -17,10 +18,15 @@ type Block struct {
|
|||
Base
|
||||
|
||||
// Transaction list.
|
||||
Transactions []*transaction.Transaction `json:"tx"`
|
||||
Transactions []*transaction.Transaction
|
||||
|
||||
// True if this block is created from trimmed data.
|
||||
Trimmed bool `json:"-"`
|
||||
Trimmed bool
|
||||
}
|
||||
|
||||
// auxTxes is used for JSON i/o.
|
||||
type auxTxes struct {
|
||||
Transactions []*transaction.Transaction `json:"tx"`
|
||||
}
|
||||
|
||||
// Header returns the Header of the Block.
|
||||
|
@ -149,3 +155,42 @@ func (b *Block) Compare(item queue.Item) int {
|
|||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler interface.
|
||||
func (b Block) MarshalJSON() ([]byte, error) {
|
||||
txes, err := json.Marshal(auxTxes{b.Transactions})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseBytes, err := json.Marshal(b.Base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Stitch them together.
|
||||
if baseBytes[len(baseBytes)-1] != '}' || txes[0] != '{' {
|
||||
return nil, errors.New("can't merge internal jsons")
|
||||
}
|
||||
baseBytes[len(baseBytes)-1] = ','
|
||||
baseBytes = append(baseBytes, txes[1:]...)
|
||||
return baseBytes, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface.
|
||||
func (b *Block) UnmarshalJSON(data []byte) error {
|
||||
// As Base and txes are at the same level in json,
|
||||
// do unmarshalling separately for both structs.
|
||||
txes := new(auxTxes)
|
||||
err := json.Unmarshal(data, txes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
base := new(Base)
|
||||
err = json.Unmarshal(data, base)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Base = *base
|
||||
b.Transactions = txes.Transactions
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
package block
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
@ -12,33 +16,33 @@ import (
|
|||
// Base holds the base info of a block
|
||||
type Base struct {
|
||||
// Version of the block.
|
||||
Version uint32 `json:"version"`
|
||||
Version uint32
|
||||
|
||||
// hash of the previous block.
|
||||
PrevHash util.Uint256 `json:"previousblockhash"`
|
||||
PrevHash util.Uint256
|
||||
|
||||
// Root hash of a transaction list.
|
||||
MerkleRoot util.Uint256 `json:"merkleroot"`
|
||||
MerkleRoot util.Uint256
|
||||
|
||||
// The time stamp of each block must be later than previous block's time stamp.
|
||||
// Generally the difference of two block's time stamp is about 15 seconds and imprecision is allowed.
|
||||
// The height of the block must be exactly equal to the height of the previous block plus 1.
|
||||
Timestamp uint32 `json:"time"`
|
||||
Timestamp uint32
|
||||
|
||||
// index/height of the block
|
||||
Index uint32 `json:"height"`
|
||||
Index uint32
|
||||
|
||||
// Random number also called nonce
|
||||
ConsensusData uint64 `json:"nonce"`
|
||||
ConsensusData uint64
|
||||
|
||||
// Contract address of the next miner
|
||||
NextConsensus util.Uint160 `json:"next_consensus"`
|
||||
NextConsensus util.Uint160
|
||||
|
||||
// Padding that is fixed to 1
|
||||
_ uint8
|
||||
|
||||
// Script used to validate the block
|
||||
Script transaction.Witness `json:"script"`
|
||||
Script transaction.Witness
|
||||
|
||||
// Hash of this block, created when binary encoded (double SHA256).
|
||||
hash util.Uint256
|
||||
|
@ -47,6 +51,21 @@ type Base struct {
|
|||
verificationHash util.Uint256
|
||||
}
|
||||
|
||||
// baseAux is used to marshal/unmarshal to/from JSON, it's almost the same
|
||||
// as original Base, but with Nonce and NextConsensus fields differing and
|
||||
// Hash added.
|
||||
type baseAux struct {
|
||||
Hash util.Uint256 `json:"hash"`
|
||||
Version uint32 `json:"version"`
|
||||
PrevHash util.Uint256 `json:"previousblockhash"`
|
||||
MerkleRoot util.Uint256 `json:"merkleroot"`
|
||||
Timestamp uint32 `json:"time"`
|
||||
Index uint32 `json:"index"`
|
||||
Nonce string `json:"nonce"`
|
||||
NextConsensus string `json:"nextconsensus"`
|
||||
Script transaction.Witness `json:"script"`
|
||||
}
|
||||
|
||||
// Verify verifies the integrity of the Base.
|
||||
func (b *Base) Verify() bool {
|
||||
// TODO: Need a persisted blockchain for this.
|
||||
|
@ -140,3 +159,56 @@ func (b *Base) decodeHashableFields(br *io.BinReader) {
|
|||
b.createHash()
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler interface.
|
||||
func (b Base) MarshalJSON() ([]byte, error) {
|
||||
nonce := strconv.FormatUint(b.ConsensusData, 16)
|
||||
for len(nonce) < 16 {
|
||||
nonce = "0" + nonce
|
||||
}
|
||||
aux := baseAux{
|
||||
Hash: b.Hash(),
|
||||
Version: b.Version,
|
||||
PrevHash: b.PrevHash,
|
||||
MerkleRoot: b.MerkleRoot,
|
||||
Timestamp: b.Timestamp,
|
||||
Index: b.Index,
|
||||
Nonce: nonce,
|
||||
NextConsensus: address.Uint160ToString(b.NextConsensus),
|
||||
Script: b.Script,
|
||||
}
|
||||
return json.Marshal(aux)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface.
|
||||
func (b *Base) UnmarshalJSON(data []byte) error {
|
||||
var aux = new(baseAux)
|
||||
var nonce uint64
|
||||
var nextC util.Uint160
|
||||
|
||||
err := json.Unmarshal(data, aux)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nonce, err = strconv.ParseUint(aux.Nonce, 16, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextC, err = address.StringToUint160(aux.NextConsensus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Version = aux.Version
|
||||
b.PrevHash = aux.PrevHash
|
||||
b.MerkleRoot = aux.MerkleRoot
|
||||
b.Timestamp = aux.Timestamp
|
||||
b.Index = aux.Index
|
||||
b.ConsensusData = nonce
|
||||
b.NextConsensus = nextC
|
||||
b.Script = aux.Script
|
||||
if !aux.Hash.Equals(b.Hash()) {
|
||||
return errors.New("json 'hash' doesn't match block hash")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -188,6 +188,8 @@ func TestBinBlockDecodeEncode(t *testing.T) {
|
|||
data, err := testserdes.EncodeBinary(&b)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, rawtx, hex.EncodeToString(data))
|
||||
|
||||
testserdes.MarshalUnmarshalJSON(t, &b, new(Block))
|
||||
}
|
||||
|
||||
func TestBlockSizeCalculation(t *testing.T) {
|
||||
|
|
|
@ -13,9 +13,11 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/dao"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
|
@ -30,7 +32,7 @@ import (
|
|||
// Tuning parameters.
|
||||
const (
|
||||
headerBatchCount = 2000
|
||||
version = "0.0.9"
|
||||
version = "0.0.10"
|
||||
|
||||
// This one comes from C# code and it's different from the constant used
|
||||
// when creating an asset with Neo.Asset.Create interop call. It looks
|
||||
|
@ -63,7 +65,9 @@ var (
|
|||
persistInterval = 1 * time.Second
|
||||
)
|
||||
|
||||
// Blockchain represents the blockchain.
|
||||
// Blockchain represents the blockchain. It maintans internal state representing
|
||||
// the state of the ledger that can be accessed in various ways and changed by
|
||||
// adding new blocks or headers.
|
||||
type Blockchain struct {
|
||||
config config.ProtocolConfiguration
|
||||
|
||||
|
@ -99,6 +103,7 @@ type Blockchain struct {
|
|||
|
||||
generationAmount []int
|
||||
decrementInterval int
|
||||
noBonusHeight uint32
|
||||
|
||||
// All operations on headerList must be called from an
|
||||
// headersOp to be routine safe.
|
||||
|
@ -122,12 +127,27 @@ type Blockchain struct {
|
|||
log *zap.Logger
|
||||
|
||||
lastBatch *storage.MemBatch
|
||||
|
||||
// Notification subsystem.
|
||||
events chan bcEvent
|
||||
subCh chan interface{}
|
||||
unsubCh chan interface{}
|
||||
}
|
||||
|
||||
// bcEvent is an internal event generated by the Blockchain and then
|
||||
// broadcasted to other parties. It joins the new block and associated
|
||||
// invocation logs, all the other events visible from outside can be produced
|
||||
// from this combination.
|
||||
type bcEvent struct {
|
||||
block *block.Block
|
||||
appExecResults []*state.AppExecResult
|
||||
}
|
||||
|
||||
type headersOpFunc func(headerList *HeaderHashList)
|
||||
|
||||
// NewBlockchain returns a new blockchain object the will use the
|
||||
// given Store as its underlying storage.
|
||||
// given Store as its underlying storage. For it to work correctly you need
|
||||
// to spawn a goroutine for its Run method after this initialization.
|
||||
func NewBlockchain(s storage.Store, cfg config.ProtocolConfiguration, log *zap.Logger) (*Blockchain, error) {
|
||||
if log == nil {
|
||||
return nil, errors.New("empty logger")
|
||||
|
@ -137,18 +157,14 @@ func NewBlockchain(s storage.Store, cfg config.ProtocolConfiguration, log *zap.L
|
|||
cfg.MemPoolSize = defaultMemPoolSize
|
||||
log.Info("mempool size is not set or wrong, setting default value", zap.Int("MemPoolSize", cfg.MemPoolSize))
|
||||
}
|
||||
if cfg.MaxTransactionsPerBlock <= 0 {
|
||||
cfg.MaxTransactionsPerBlock = 0
|
||||
log.Info("MaxTransactionsPerBlock is not set or wrong, setting default value (unlimited)", zap.Int("MaxTransactionsPerBlock", cfg.MaxTransactionsPerBlock))
|
||||
}
|
||||
if cfg.MaxFreeTransactionsPerBlock <= 0 {
|
||||
cfg.MaxFreeTransactionsPerBlock = 0
|
||||
log.Info("MaxFreeTransactionsPerBlock is not set or wrong, setting default value (unlimited)", zap.Int("MaxFreeTransactionsPerBlock", cfg.MaxFreeTransactionsPerBlock))
|
||||
}
|
||||
if cfg.MaxFreeTransactionSize <= 0 {
|
||||
cfg.MaxFreeTransactionSize = 0
|
||||
log.Info("MaxFreeTransactionSize is not set or wrong, setting default value (unlimited)", zap.Int("MaxFreeTransactionSize", cfg.MaxFreeTransactionSize))
|
||||
}
|
||||
if cfg.MinimumNetworkFee < 0 {
|
||||
cfg.MinimumNetworkFee = 0
|
||||
log.Info("MinimumNetworkFee is not set or wrong, setting default value (0)", zap.String("MinimumNetworkFee", cfg.MinimumNetworkFee.String()))
|
||||
}
|
||||
if cfg.FeePerExtraByte <= 0 {
|
||||
cfg.FeePerExtraByte = 0
|
||||
log.Info("FeePerExtraByte is not set or wrong, setting default value", zap.Float64("FeePerExtraByte", cfg.FeePerExtraByte))
|
||||
|
@ -163,9 +179,13 @@ func NewBlockchain(s storage.Store, cfg config.ProtocolConfiguration, log *zap.L
|
|||
memPool: mempool.NewMemPool(cfg.MemPoolSize),
|
||||
keyCache: make(map[util.Uint160]map[string]*keys.PublicKey),
|
||||
log: log,
|
||||
events: make(chan bcEvent),
|
||||
subCh: make(chan interface{}),
|
||||
unsubCh: make(chan interface{}),
|
||||
|
||||
generationAmount: genAmount,
|
||||
decrementInterval: decrementInterval,
|
||||
noBonusHeight: cfg.NoBonusHeight,
|
||||
}
|
||||
|
||||
if err := bc.init(); err != nil {
|
||||
|
@ -192,6 +212,11 @@ func (bc *Blockchain) init() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bc.config.EnableStateRoot {
|
||||
if err := bc.dao.InitMPT(0, bc.config.KeepOnlyLatestState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return bc.storeBlock(genesisBlock)
|
||||
}
|
||||
if ver != version {
|
||||
|
@ -209,6 +234,11 @@ func (bc *Blockchain) init() error {
|
|||
}
|
||||
bc.blockHeight = bHeight
|
||||
bc.persistedHeight = bHeight
|
||||
if bc.config.EnableStateRoot {
|
||||
if err = bc.dao.InitMPT(bHeight, bc.config.KeepOnlyLatestState); err != nil {
|
||||
return errors.Wrapf(err, "can't init MPT at height %d", bHeight)
|
||||
}
|
||||
}
|
||||
|
||||
hashes, err := bc.dao.GetHeaderHashes()
|
||||
if err != nil {
|
||||
|
@ -264,11 +294,13 @@ func (bc *Blockchain) init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Run runs chain loop.
|
||||
// Run runs chain loop, it needs to be run as goroutine and executing it is
|
||||
// critical for correct Blockchain operation.
|
||||
func (bc *Blockchain) Run() {
|
||||
persistTimer := time.NewTimer(persistInterval)
|
||||
defer func() {
|
||||
persistTimer.Stop()
|
||||
bc.addLock.Lock() // Prevent changing state, but do not release the lock, we're about to exit.
|
||||
if err := bc.persist(); err != nil {
|
||||
bc.log.Warn("failed to persist", zap.Error(err))
|
||||
}
|
||||
|
@ -277,6 +309,7 @@ func (bc *Blockchain) Run() {
|
|||
}
|
||||
close(bc.runToExitCh)
|
||||
}()
|
||||
go bc.notificationDispatcher()
|
||||
for {
|
||||
select {
|
||||
case <-bc.stopCh:
|
||||
|
@ -296,6 +329,82 @@ func (bc *Blockchain) Run() {
|
|||
}
|
||||
}
|
||||
|
||||
// notificationDispatcher manages subscription to events and broadcasts new events.
|
||||
func (bc *Blockchain) notificationDispatcher() {
|
||||
var (
|
||||
// These are just sets of subscribers, though modelled as maps
|
||||
// for ease of management (not a lot of subscriptions is really
|
||||
// expected, but maps are convenient for adding/deleting elements).
|
||||
blockFeed = make(map[chan<- *block.Block]bool)
|
||||
txFeed = make(map[chan<- *transaction.Transaction]bool)
|
||||
notificationFeed = make(map[chan<- *state.NotificationEvent]bool)
|
||||
executionFeed = make(map[chan<- *state.AppExecResult]bool)
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case <-bc.stopCh:
|
||||
return
|
||||
case sub := <-bc.subCh:
|
||||
switch ch := sub.(type) {
|
||||
case chan<- *block.Block:
|
||||
blockFeed[ch] = true
|
||||
case chan<- *transaction.Transaction:
|
||||
txFeed[ch] = true
|
||||
case chan<- *state.NotificationEvent:
|
||||
notificationFeed[ch] = true
|
||||
case chan<- *state.AppExecResult:
|
||||
executionFeed[ch] = true
|
||||
default:
|
||||
panic(fmt.Sprintf("bad subscription: %T", sub))
|
||||
}
|
||||
case unsub := <-bc.unsubCh:
|
||||
switch ch := unsub.(type) {
|
||||
case chan<- *block.Block:
|
||||
delete(blockFeed, ch)
|
||||
case chan<- *transaction.Transaction:
|
||||
delete(txFeed, ch)
|
||||
case chan<- *state.NotificationEvent:
|
||||
delete(notificationFeed, ch)
|
||||
case chan<- *state.AppExecResult:
|
||||
delete(executionFeed, ch)
|
||||
default:
|
||||
panic(fmt.Sprintf("bad unsubscription: %T", unsub))
|
||||
}
|
||||
case event := <-bc.events:
|
||||
// We don't want to waste time looping through transactions when there are no
|
||||
// subscribers.
|
||||
if len(txFeed) != 0 || len(notificationFeed) != 0 || len(executionFeed) != 0 {
|
||||
var aerIdx int
|
||||
for _, tx := range event.block.Transactions {
|
||||
if tx.Type == transaction.InvocationType {
|
||||
aer := event.appExecResults[aerIdx]
|
||||
if !aer.TxHash.Equals(tx.Hash()) {
|
||||
panic("inconsistent application execution results")
|
||||
}
|
||||
aerIdx++
|
||||
for ch := range executionFeed {
|
||||
ch <- aer
|
||||
}
|
||||
if aer.VMState == "HALT" {
|
||||
for i := range aer.Events {
|
||||
for ch := range notificationFeed {
|
||||
ch <- &aer.Events[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for ch := range txFeed {
|
||||
ch <- tx
|
||||
}
|
||||
}
|
||||
}
|
||||
for ch := range blockFeed {
|
||||
ch <- event.block
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close stops Blockchain's internal loop, syncs changes to persistent storage
|
||||
// and closes it. The Blockchain is no longer functional after the call to Close.
|
||||
func (bc *Blockchain) Close() {
|
||||
|
@ -446,20 +555,38 @@ func (bc *Blockchain) processHeader(h *block.Header, batch storage.Batch, header
|
|||
return nil
|
||||
}
|
||||
|
||||
// bc.GetHeaderHash(int(endHeight)) returns sum of all system fees for blocks up to h.
|
||||
// GetSystemFeeAmount returns sum of all system fees for blocks up to h.
|
||||
// and 0 if no such block exists.
|
||||
func (bc *Blockchain) getSystemFeeAmount(h util.Uint256) uint32 {
|
||||
func (bc *Blockchain) GetSystemFeeAmount(h util.Uint256) uint32 {
|
||||
_, sf, _ := bc.dao.GetBlock(h)
|
||||
return sf
|
||||
}
|
||||
|
||||
// GetStateProof returns proof of having key in the MPT with the specified root.
|
||||
func (bc *Blockchain) GetStateProof(root util.Uint256, key []byte) ([][]byte, error) {
|
||||
if !bc.config.EnableStateRoot {
|
||||
return nil, errors.New("state root feature is not enabled")
|
||||
}
|
||||
tr := mpt.NewTrie(mpt.NewHashNode(root), bc.config.KeepOnlyLatestState, storage.NewMemCachedStore(bc.dao.Store))
|
||||
return tr.GetProof(key)
|
||||
}
|
||||
|
||||
// GetStateRoot returns state root for a given height.
|
||||
func (bc *Blockchain) GetStateRoot(height uint32) (*state.MPTRootState, error) {
|
||||
if !bc.config.EnableStateRoot {
|
||||
return nil, errors.New("state root feature is not enabled")
|
||||
}
|
||||
return bc.dao.GetStateRoot(height)
|
||||
}
|
||||
|
||||
// TODO: storeBlock needs some more love, its implemented as in the original
|
||||
// project. This for the sake of development speed and understanding of what
|
||||
// is happening here, quite allot as you can see :). If things are wired together
|
||||
// and all tests are in place, we can make a more optimized and cleaner implementation.
|
||||
func (bc *Blockchain) storeBlock(block *block.Block) error {
|
||||
cache := dao.NewCached(bc.dao)
|
||||
fee := bc.getSystemFeeAmount(block.PrevHash)
|
||||
appExecResults := make([]*state.AppExecResult, 0, len(block.Transactions))
|
||||
fee := bc.GetSystemFeeAmount(block.PrevHash)
|
||||
for _, tx := range block.Transactions {
|
||||
fee += uint32(bc.SystemFee(tx).IntegralValue())
|
||||
}
|
||||
|
@ -481,7 +608,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
}
|
||||
|
||||
// Process TX outputs.
|
||||
if err := processOutputs(tx, cache); err != nil {
|
||||
if err := processOutputs(tx, block, cache); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -502,6 +629,9 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
unspent.States[input.PrevIndex].State |= state.CoinSpent
|
||||
unspent.States[input.PrevIndex].SpendHeight = block.Index
|
||||
prevTXOutput := &unspent.States[input.PrevIndex].Output
|
||||
if err := processTransfer(cache, tx, block, prevTXOutput, true); err != nil {
|
||||
return err
|
||||
}
|
||||
account, err := cache.GetAccountStateOrNew(prevTXOutput.ScriptHash)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -562,6 +692,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
Precision: t.Precision,
|
||||
Owner: t.Owner,
|
||||
Admin: t.Admin,
|
||||
Issuer: t.Admin,
|
||||
Expiration: bc.BlockHeight() + registeredAssetLifetime,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -661,8 +792,9 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
v.SetCheckedHash(tx.VerificationHash().BytesBE())
|
||||
v.LoadScript(t.Script)
|
||||
v.SetPriceGetter(getPrice)
|
||||
if bc.config.FreeGasLimit > 0 {
|
||||
v.SetGasLimit(bc.config.FreeGasLimit + t.Gas)
|
||||
gasLimit := bc.config.GetFreeGas(block.Index)
|
||||
if gasLimit > 0 {
|
||||
v.SetGasLimit(gasLimit + t.Gas)
|
||||
}
|
||||
|
||||
err := v.Run()
|
||||
|
@ -671,32 +803,14 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "failed to persist invocation results")
|
||||
}
|
||||
var index uint32
|
||||
for _, note := range systemInterop.notifications {
|
||||
arr, ok := note.Item.Value().([]vm.StackItem)
|
||||
if !ok || len(arr) != 4 {
|
||||
transfer, err := state.NEP5TransferFromNotification(note, tx.Hash(), block.Index, block.Timestamp, index)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
op, ok := arr[0].Value().([]byte)
|
||||
if !ok || string(op) != "transfer" {
|
||||
continue
|
||||
}
|
||||
from, ok := arr[1].Value().([]byte)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
to, ok := arr[2].Value().([]byte)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
amount, ok := arr[3].Value().(*big.Int)
|
||||
if !ok {
|
||||
bs, ok := arr[3].Value().([]byte)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
amount = emit.BytesToInt(bs)
|
||||
}
|
||||
bc.processNEP5Transfer(cache, tx, block, note.ScriptHash, from, to, amount.Int64())
|
||||
bc.processNEP5Transfer(cache, transfer)
|
||||
index++
|
||||
}
|
||||
} else {
|
||||
bc.log.Warn("contract invocation failed",
|
||||
|
@ -712,109 +826,219 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
Stack: v.Estack().ToContractParameters(),
|
||||
Events: systemInterop.notifications,
|
||||
}
|
||||
appExecResults = append(appExecResults, aer)
|
||||
err = cache.PutAppExecResult(aer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to Store notifications")
|
||||
}
|
||||
}
|
||||
}
|
||||
bc.lock.Lock()
|
||||
defer bc.lock.Unlock()
|
||||
|
||||
if bc.config.EnableStateRoot {
|
||||
root := bc.dao.MPT.StateRoot()
|
||||
var prevHash util.Uint256
|
||||
if block.Index > 0 {
|
||||
prev, err := bc.dao.GetStateRoot(block.Index - 1)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "can't get previous state root")
|
||||
}
|
||||
prevHash = hash.DoubleSha256(prev.GetSignedPart())
|
||||
}
|
||||
err := bc.AddStateRoot(&state.MPTRoot{
|
||||
MPTRootBase: state.MPTRootBase{
|
||||
Index: block.Index,
|
||||
PrevHash: prevHash,
|
||||
Root: root,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bc.config.SaveStorageBatch {
|
||||
bc.lastBatch = cache.DAO.GetBatch()
|
||||
}
|
||||
|
||||
bc.lock.Lock()
|
||||
_, err := cache.Persist()
|
||||
if err != nil {
|
||||
bc.lock.Unlock()
|
||||
return err
|
||||
}
|
||||
if bc.config.EnableStateRoot {
|
||||
bc.dao.MPT.Flush()
|
||||
// Every persist cycle we also compact our in-memory MPT.
|
||||
persistedHeight := atomic.LoadUint32(&bc.persistedHeight)
|
||||
if persistedHeight == block.Index-1 {
|
||||
// 10 is good and roughly estimated to fit remaining trie into 1M of memory.
|
||||
bc.dao.MPT.Collapse(10)
|
||||
}
|
||||
}
|
||||
bc.topBlock.Store(block)
|
||||
atomic.StoreUint32(&bc.blockHeight, block.Index)
|
||||
bc.memPool.RemoveStale(bc.isTxStillRelevant, block.Index)
|
||||
bc.lock.Unlock()
|
||||
|
||||
updateBlockHeightMetric(block.Index)
|
||||
bc.memPool.RemoveStale(bc.isTxStillRelevant)
|
||||
// Genesis block is stored when Blockchain is not yet running, so there
|
||||
// is no one to read this event. And it doesn't make much sense as event
|
||||
// anyway.
|
||||
if block.Index != 0 {
|
||||
bc.events <- bcEvent{block, appExecResults}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseUint160(addr []byte) util.Uint160 {
|
||||
if u, err := util.Uint160DecodeBytesBE(addr); err == nil {
|
||||
return u
|
||||
func appendSingleTransfer(cache *dao.Cached, acc util.Uint160, tr *state.Transfer) error {
|
||||
index, err := cache.GetNextTransferBatch(acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return util.Uint160{}
|
||||
isBig, err := cache.AppendTransfer(acc, index, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBig {
|
||||
if err := cache.PutNextTransferBatch(acc, index+1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *Blockchain) processNEP5Transfer(cache *dao.Cached, tx *transaction.Transaction, b *block.Block, sc util.Uint160, from, to []byte, amount int64) {
|
||||
toAddr := parseUint160(to)
|
||||
fromAddr := parseUint160(from)
|
||||
transfer := &state.NEP5Transfer{
|
||||
Asset: sc,
|
||||
From: fromAddr,
|
||||
To: toAddr,
|
||||
Block: b.Index,
|
||||
Timestamp: b.Timestamp,
|
||||
Tx: tx.Hash(),
|
||||
// processTransfer processes single UTXO transfer. Totals is a slice of neo (0) and gas (1) total transfer amount.
|
||||
func processTransfer(cache *dao.Cached, tx *transaction.Transaction, b *block.Block, out *transaction.Output,
|
||||
isSent bool) error {
|
||||
isGoverning := out.AssetID.Equals(GoverningTokenID())
|
||||
if !isGoverning && !out.AssetID.Equals(UtilityTokenID()) {
|
||||
return nil
|
||||
}
|
||||
if !fromAddr.Equals(util.Uint160{}) {
|
||||
balances, err := cache.GetNEP5Balances(fromAddr)
|
||||
var amount = int64(out.Amount)
|
||||
// NEO has no fractional part and Fixed8 representation is just misleading here.
|
||||
if isGoverning {
|
||||
amount = out.Amount.IntegralValue()
|
||||
}
|
||||
tr := &state.Transfer{
|
||||
IsGoverning: isGoverning,
|
||||
IsSent: isSent,
|
||||
Amount: amount,
|
||||
Block: b.Index,
|
||||
Timestamp: b.Timestamp,
|
||||
Tx: tx.Hash(),
|
||||
}
|
||||
index, err := cache.GetNextTransferBatch(out.ScriptHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isBig, err := cache.AppendTransfer(out.ScriptHash, index, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBig {
|
||||
return cache.PutNextTransferBatch(out.ScriptHash, index+1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *Blockchain) processNEP5Transfer(cache *dao.Cached, transfer *state.NEP5Transfer) {
|
||||
if !transfer.From.Equals(util.Uint160{}) {
|
||||
balances, err := cache.GetNEP5Balances(transfer.From)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bs := balances.Trackers[sc]
|
||||
bs.Balance -= amount
|
||||
bs.LastUpdatedBlock = b.Index
|
||||
balances.Trackers[sc] = bs
|
||||
bs := balances.Trackers[transfer.Asset]
|
||||
if bs.Balance != nil {
|
||||
bs.Balance.Sub(bs.Balance, transfer.Amount)
|
||||
if bs.Balance.Sign() > 0 {
|
||||
bs.LastUpdatedBlock = transfer.Block
|
||||
balances.Trackers[transfer.Asset] = bs
|
||||
} else {
|
||||
delete(balances.Trackers, transfer.Asset)
|
||||
}
|
||||
}
|
||||
|
||||
transfer.Amount = -amount
|
||||
isBig, err := cache.AppendNEP5Transfer(fromAddr, balances.NextTransferBatch, transfer)
|
||||
transfer.Amount.Neg(transfer.Amount)
|
||||
isBig, err := cache.AppendNEP5Transfer(transfer.From, balances.NextTransferBatch, transfer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
transfer.Amount.Neg(transfer.Amount)
|
||||
if isBig {
|
||||
balances.NextTransferBatch++
|
||||
}
|
||||
if err := cache.PutNEP5Balances(transfer.From, balances); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if !transfer.To.Equals(util.Uint160{}) {
|
||||
balances, err := cache.GetNEP5Balances(transfer.To)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bs := balances.Trackers[transfer.Asset]
|
||||
if bs.Balance == nil {
|
||||
bs.Balance = new(big.Int)
|
||||
}
|
||||
bs.Balance.Add(bs.Balance, transfer.Amount)
|
||||
bs.LastUpdatedBlock = transfer.Block
|
||||
balances.Trackers[transfer.Asset] = bs
|
||||
|
||||
isBig, err := cache.AppendNEP5Transfer(transfer.To, balances.NextTransferBatch, transfer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if isBig {
|
||||
balances.NextTransferBatch++
|
||||
}
|
||||
if err := cache.PutNEP5Balances(fromAddr, balances); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if !toAddr.Equals(util.Uint160{}) {
|
||||
balances, err := cache.GetNEP5Balances(toAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bs := balances.Trackers[sc]
|
||||
bs.Balance += amount
|
||||
bs.LastUpdatedBlock = b.Index
|
||||
balances.Trackers[sc] = bs
|
||||
|
||||
transfer.Amount = amount
|
||||
isBig, err := cache.AppendNEP5Transfer(toAddr, balances.NextTransferBatch, transfer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if isBig {
|
||||
balances.NextTransferBatch++
|
||||
}
|
||||
if err := cache.PutNEP5Balances(toAddr, balances); err != nil {
|
||||
if err := cache.PutNEP5Balances(transfer.To, balances); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetNEP5TransferLog returns NEP5 transfer log for the acc.
|
||||
func (bc *Blockchain) GetNEP5TransferLog(acc util.Uint160) *state.NEP5TransferLog {
|
||||
// ForEachTransfer executes f for each transfer in log.
|
||||
func (bc *Blockchain) ForEachTransfer(acc util.Uint160, tr *state.Transfer, f func() (bool, error)) error {
|
||||
nb, err := bc.dao.GetNextTransferBatch(acc)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
for i := int(nb); i >= 0; i-- {
|
||||
lg, err := bc.dao.GetTransferLog(acc, uint32(i))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
cont, err := lg.ForEach(state.TransferSize, tr, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !cont {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForEachNEP5Transfer executes f for each nep5 transfer in log.
|
||||
func (bc *Blockchain) ForEachNEP5Transfer(acc util.Uint160, tr *state.NEP5Transfer, f func() (bool, error)) error {
|
||||
balances, err := bc.dao.GetNEP5Balances(acc)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
result := new(state.NEP5TransferLog)
|
||||
for i := uint32(0); i <= balances.NextTransferBatch; i++ {
|
||||
lg, err := bc.dao.GetNEP5TransferLog(acc, i)
|
||||
for i := int(balances.NextTransferBatch); i >= 0; i-- {
|
||||
lg, err := bc.dao.GetNEP5TransferLog(acc, uint32(i))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
result.Raw = append(result.Raw, lg.Raw...)
|
||||
cont, err := lg.ForEach(state.NEP5TransferSize, tr, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !cont {
|
||||
break
|
||||
}
|
||||
}
|
||||
return result
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNEP5Balances returns NEP5 balances for the acc.
|
||||
|
@ -826,13 +1050,19 @@ func (bc *Blockchain) GetNEP5Balances(acc util.Uint160) *state.NEP5Balances {
|
|||
return bs
|
||||
}
|
||||
|
||||
// GetNEP5Metadata returns NEP5 metadata for the contract h.
|
||||
// Note: it is currently saved only for migrated contracts.
|
||||
func (bc *Blockchain) GetNEP5Metadata(h util.Uint160) (*state.NEP5Metadata, error) {
|
||||
return bc.dao.GetNEP5Metadata(h)
|
||||
}
|
||||
|
||||
// LastBatch returns last persisted storage batch.
|
||||
func (bc *Blockchain) LastBatch() *storage.MemBatch {
|
||||
return bc.lastBatch
|
||||
}
|
||||
|
||||
// processOutputs processes transaction outputs.
|
||||
func processOutputs(tx *transaction.Transaction, dao *dao.Cached) error {
|
||||
func processOutputs(tx *transaction.Transaction, b *block.Block, dao *dao.Cached) error {
|
||||
for index, output := range tx.Outputs {
|
||||
account, err := dao.GetAccountStateOrNew(output.ScriptHash)
|
||||
if err != nil {
|
||||
|
@ -849,6 +1079,9 @@ func processOutputs(tx *transaction.Transaction, dao *dao.Cached) error {
|
|||
if err = processTXWithValidatorsAdd(&output, account, dao); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = processTransfer(dao, tx, b, &output, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1018,10 +1251,10 @@ func (bc *Blockchain) headerListLen() (n int) {
|
|||
return
|
||||
}
|
||||
|
||||
// GetTransaction returns a TX and its height by the given hash.
|
||||
// GetTransaction returns a TX and its height by the given hash. The height is MaxUint32 if tx is in the mempool.
|
||||
func (bc *Blockchain) GetTransaction(hash util.Uint256) (*transaction.Transaction, uint32, error) {
|
||||
if tx, _, ok := bc.memPool.TryGetValue(hash); ok {
|
||||
return tx, 0, nil // the height is not actually defined for memPool transaction. Not sure if zero is a good number in this case.
|
||||
return tx, math.MaxUint32, nil // the height is not actually defined for memPool transaction.
|
||||
}
|
||||
return bc.dao.GetTransaction(hash)
|
||||
}
|
||||
|
@ -1039,7 +1272,16 @@ func (bc *Blockchain) GetStorageItem(scripthash util.Uint160, key []byte) *state
|
|||
|
||||
// GetStorageItems returns all storage items for a given scripthash.
|
||||
func (bc *Blockchain) GetStorageItems(hash util.Uint160) (map[string]*state.StorageItem, error) {
|
||||
return bc.dao.GetStorageItems(hash)
|
||||
siMap, err := bc.dao.GetStorageItems(hash, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := make(map[string]*state.StorageItem)
|
||||
for i := range siMap {
|
||||
val := siMap[i].StorageItem
|
||||
m[string(siMap[i].Key)] = &val
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// GetBlock returns a Block by the given hash.
|
||||
|
@ -1179,6 +1421,68 @@ func (bc *Blockchain) GetConfig() config.ProtocolConfiguration {
|
|||
return bc.config
|
||||
}
|
||||
|
||||
// SubscribeForBlocks adds given channel to new block event broadcasting, so when
|
||||
// there is a new block added to the chain you'll receive it via this channel.
|
||||
// Make sure it's read from regularly as not reading these events might affect
|
||||
// other Blockchain functions.
|
||||
func (bc *Blockchain) SubscribeForBlocks(ch chan<- *block.Block) {
|
||||
bc.subCh <- ch
|
||||
}
|
||||
|
||||
// SubscribeForTransactions adds given channel to new transaction event
|
||||
// broadcasting, so when there is a new transaction added to the chain (in a
|
||||
// block) you'll receive it via this channel. Make sure it's read from regularly
|
||||
// as not reading these events might affect other Blockchain functions.
|
||||
func (bc *Blockchain) SubscribeForTransactions(ch chan<- *transaction.Transaction) {
|
||||
bc.subCh <- ch
|
||||
}
|
||||
|
||||
// SubscribeForNotifications adds given channel to new notifications event
|
||||
// broadcasting, so when an in-block transaction execution generates a
|
||||
// notification you'll receive it via this channel. Only notifications from
|
||||
// successful transactions are broadcasted, if you're interested in failed
|
||||
// transactions use SubscribeForExecutions instead. Make sure this channel is
|
||||
// read from regularly as not reading these events might affect other Blockchain
|
||||
// functions.
|
||||
func (bc *Blockchain) SubscribeForNotifications(ch chan<- *state.NotificationEvent) {
|
||||
bc.subCh <- ch
|
||||
}
|
||||
|
||||
// SubscribeForExecutions adds given channel to new transaction execution event
|
||||
// broadcasting, so when an in-block transaction execution happens you'll receive
|
||||
// the result of it via this channel. Make sure it's read from regularly as not
|
||||
// reading these events might affect other Blockchain functions.
|
||||
func (bc *Blockchain) SubscribeForExecutions(ch chan<- *state.AppExecResult) {
|
||||
bc.subCh <- ch
|
||||
}
|
||||
|
||||
// UnsubscribeFromBlocks unsubscribes given channel from new block notifications,
|
||||
// you can close it afterwards. Passing non-subscribed channel is a no-op.
|
||||
func (bc *Blockchain) UnsubscribeFromBlocks(ch chan<- *block.Block) {
|
||||
bc.unsubCh <- ch
|
||||
}
|
||||
|
||||
// UnsubscribeFromTransactions unsubscribes given channel from new transaction
|
||||
// notifications, you can close it afterwards. Passing non-subscribed channel is
|
||||
// a no-op.
|
||||
func (bc *Blockchain) UnsubscribeFromTransactions(ch chan<- *transaction.Transaction) {
|
||||
bc.unsubCh <- ch
|
||||
}
|
||||
|
||||
// UnsubscribeFromNotifications unsubscribes given channel from new
|
||||
// execution-generated notifications, you can close it afterwards. Passing
|
||||
// non-subscribed channel is a no-op.
|
||||
func (bc *Blockchain) UnsubscribeFromNotifications(ch chan<- *state.NotificationEvent) {
|
||||
bc.unsubCh <- ch
|
||||
}
|
||||
|
||||
// UnsubscribeFromExecutions unsubscribes given channel from new execution
|
||||
// notifications, you can close it afterwards. Passing non-subscribed channel is
|
||||
// a no-op.
|
||||
func (bc *Blockchain) UnsubscribeFromExecutions(ch chan<- *state.AppExecResult) {
|
||||
bc.unsubCh <- ch
|
||||
}
|
||||
|
||||
// CalculateClaimable calculates the amount of GAS which can be claimed for a transaction with value.
|
||||
// First return value is GAS generated between startHeight and endHeight.
|
||||
// Second return value is GAS returned from accumulated SystemFees between startHeight and endHeight.
|
||||
|
@ -1187,9 +1491,14 @@ func (bc *Blockchain) CalculateClaimable(value util.Fixed8, startHeight, endHeig
|
|||
di := uint32(bc.decrementInterval)
|
||||
|
||||
ustart := startHeight / di
|
||||
if genSize := uint32(len(bc.generationAmount)); ustart < genSize {
|
||||
uend := endHeight / di
|
||||
iend := endHeight % di
|
||||
genSize := uint32(len(bc.generationAmount))
|
||||
if ustart < genSize && (bc.noBonusHeight == 0 || startHeight < bc.noBonusHeight) {
|
||||
endHeightMin := endHeight
|
||||
if bc.noBonusHeight != 0 && endHeightMin > bc.noBonusHeight {
|
||||
endHeightMin = bc.noBonusHeight
|
||||
}
|
||||
uend := endHeightMin / di
|
||||
iend := endHeightMin % di
|
||||
if uend >= genSize {
|
||||
uend = genSize - 1
|
||||
iend = di
|
||||
|
@ -1212,9 +1521,9 @@ func (bc *Blockchain) CalculateClaimable(value util.Fixed8, startHeight, endHeig
|
|||
startHeight++
|
||||
}
|
||||
h := bc.GetHeaderHash(int(startHeight - 1))
|
||||
feeStart := bc.getSystemFeeAmount(h)
|
||||
feeStart := bc.GetSystemFeeAmount(h)
|
||||
h = bc.GetHeaderHash(int(endHeight - 1))
|
||||
feeEnd := bc.getSystemFeeAmount(h)
|
||||
feeEnd := bc.GetSystemFeeAmount(h)
|
||||
|
||||
sysFeeTotal := util.Fixed8(feeEnd - feeStart)
|
||||
ratio := value / 100000000
|
||||
|
@ -1286,11 +1595,44 @@ func (bc *Blockchain) NetworkFee(t *transaction.Transaction) util.Fixed8 {
|
|||
|
||||
// SystemFee returns system fee.
|
||||
func (bc *Blockchain) SystemFee(t *transaction.Transaction) util.Fixed8 {
|
||||
if t.Type == transaction.InvocationType {
|
||||
switch t.Type {
|
||||
case transaction.InvocationType:
|
||||
inv := t.Data.(*transaction.InvocationTX)
|
||||
if inv.Version >= 1 {
|
||||
return inv.Gas
|
||||
return inv.Gas
|
||||
case transaction.IssueType:
|
||||
if t.Version >= 1 {
|
||||
return util.Fixed8(0)
|
||||
}
|
||||
var iszero = true
|
||||
for i := range t.Outputs {
|
||||
asset := t.Outputs[i].AssetID
|
||||
if asset != UtilityTokenID() && asset != GoverningTokenID() {
|
||||
iszero = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if iszero {
|
||||
return util.Fixed8(0)
|
||||
}
|
||||
case transaction.RegisterType:
|
||||
reg := t.Data.(*transaction.RegisterTX)
|
||||
if reg.AssetType == transaction.GoverningToken || reg.AssetType == transaction.UtilityToken {
|
||||
return util.Fixed8(0)
|
||||
}
|
||||
case transaction.StateType:
|
||||
res := util.Fixed8(0)
|
||||
st := t.Data.(*transaction.StateTX)
|
||||
for _, desc := range st.Descriptors {
|
||||
if desc.Type == transaction.Validator && desc.Field == "Registered" {
|
||||
for i := range desc.Value {
|
||||
if desc.Value[i] != 0 {
|
||||
res += util.Fixed8FromInt64(1000)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
return bc.GetConfig().SystemFee.TryGetValue(t.Type)
|
||||
}
|
||||
|
@ -1309,16 +1651,18 @@ func (bc *Blockchain) GetMemPool() *mempool.Pool {
|
|||
// ApplyPolicyToTxSet applies configured policies to given transaction set. It
|
||||
// expects slice to be ordered by fee and returns a subslice of it.
|
||||
func (bc *Blockchain) ApplyPolicyToTxSet(txes []mempool.TxWithFee) []mempool.TxWithFee {
|
||||
if bc.config.MaxTransactionsPerBlock != 0 && len(txes) > bc.config.MaxTransactionsPerBlock {
|
||||
txes = txes[:bc.config.MaxTransactionsPerBlock]
|
||||
maxTx := bc.config.GetMaxTxPerBlock(bc.BlockHeight())
|
||||
if maxTx != 0 && len(txes) > maxTx {
|
||||
txes = txes[:maxTx]
|
||||
}
|
||||
maxFree := bc.config.MaxFreeTransactionsPerBlock
|
||||
if maxFree != 0 {
|
||||
lowStart := sort.Search(len(txes), func(i int) bool {
|
||||
return bc.IsLowPriority(txes[i].Fee)
|
||||
maxFree := bc.config.GetMaxFreeTxPerBlock(bc.BlockHeight())
|
||||
if maxFree != 0 && len(txes) > maxFree {
|
||||
// Transactions are sorted by fee, so we just find the first free one.
|
||||
freeStart := sort.Search(len(txes), func(i int) bool {
|
||||
return txes[i].Fee == 0
|
||||
})
|
||||
if lowStart+maxFree < len(txes) {
|
||||
txes = txes[:lowStart+maxFree]
|
||||
if freeStart+maxFree < len(txes) {
|
||||
txes = txes[:freeStart+maxFree]
|
||||
}
|
||||
}
|
||||
return txes
|
||||
|
@ -1553,6 +1897,90 @@ func (bc *Blockchain) isTxStillRelevant(t *transaction.Transaction) bool {
|
|||
|
||||
}
|
||||
|
||||
// StateHeight returns height of the verified state root.
|
||||
func (bc *Blockchain) StateHeight() uint32 {
|
||||
h, _ := bc.dao.GetCurrentStateRootHeight()
|
||||
return h
|
||||
}
|
||||
|
||||
// AddStateRoot add new (possibly unverified) state root to the blockchain.
|
||||
func (bc *Blockchain) AddStateRoot(r *state.MPTRoot) error {
|
||||
if !bc.config.EnableStateRoot {
|
||||
bc.log.Warn("state root is being added but not enabled in config")
|
||||
return nil
|
||||
}
|
||||
our, err := bc.GetStateRoot(r.Index)
|
||||
if err == nil {
|
||||
if our.Flag == state.Verified {
|
||||
return bc.updateStateHeight(r.Index)
|
||||
} else if r.Witness == nil && our.Witness != nil {
|
||||
r.Witness = our.Witness
|
||||
}
|
||||
}
|
||||
if err := bc.verifyStateRoot(r); err != nil {
|
||||
return errors.WithMessage(err, "invalid state root")
|
||||
}
|
||||
if r.Index > bc.BlockHeight() { // just put it into the store for future checks
|
||||
return bc.dao.PutStateRoot(&state.MPTRootState{
|
||||
MPTRoot: *r,
|
||||
Flag: state.Unverified,
|
||||
})
|
||||
}
|
||||
|
||||
flag := state.Unverified
|
||||
if r.Witness != nil {
|
||||
if err := bc.verifyStateRootWitness(r); err != nil {
|
||||
return errors.WithMessage(err, "can't verify signature")
|
||||
}
|
||||
flag = state.Verified
|
||||
}
|
||||
err = bc.dao.PutStateRoot(&state.MPTRootState{
|
||||
MPTRoot: *r,
|
||||
Flag: flag,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bc.updateStateHeight(r.Index)
|
||||
}
|
||||
|
||||
func (bc *Blockchain) updateStateHeight(newHeight uint32) error {
|
||||
h, err := bc.dao.GetCurrentStateRootHeight()
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "can't get current state root height")
|
||||
} else if (h < bc.config.StateRootEnableIndex && newHeight == bc.config.StateRootEnableIndex) || newHeight == h+1 {
|
||||
updateStateHeightMetric(newHeight)
|
||||
return bc.dao.PutCurrentStateRootHeight(newHeight)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyStateRoot checks if state root is valid.
|
||||
func (bc *Blockchain) verifyStateRoot(r *state.MPTRoot) error {
|
||||
if r.Index == 0 {
|
||||
return nil
|
||||
}
|
||||
prev, err := bc.GetStateRoot(r.Index - 1)
|
||||
if err != nil {
|
||||
return errors.New("can't get previous state root")
|
||||
} else if !r.PrevHash.Equals(hash.DoubleSha256(prev.GetSignedPart())) {
|
||||
return errors.New("previous hash mismatch")
|
||||
} else if prev.Version != r.Version {
|
||||
return errors.New("version mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyStateRootWitness verifies that state root signature is correct.
|
||||
func (bc *Blockchain) verifyStateRootWitness(r *state.MPTRoot) error {
|
||||
b, err := bc.GetBlock(bc.GetHeaderHash(int(r.Index)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
interopCtx := bc.newInteropContext(trigger.Verification, bc.dao, nil, nil)
|
||||
return bc.verifyHashAgainstScript(b.NextConsensus, r.Witness, hash.Sha256(r.GetSignedPart()), interopCtx, true)
|
||||
}
|
||||
|
||||
// VerifyTx verifies whether a transaction is bonafide or not. Block parameter
|
||||
// is used for easy interop access and can be omitted for transactions that are
|
||||
// not yet added into any block.
|
||||
|
@ -1578,13 +2006,16 @@ func (bc *Blockchain) PoolTx(t *transaction.Transaction) error {
|
|||
if t.Type != transaction.ClaimType {
|
||||
txSize := io.GetVarSize(t)
|
||||
maxFree := bc.config.MaxFreeTransactionSize
|
||||
netFee := bc.NetworkFee(t)
|
||||
if maxFree != 0 && txSize > maxFree {
|
||||
netFee := bc.NetworkFee(t)
|
||||
if bc.IsLowPriority(netFee) ||
|
||||
netFee < util.Fixed8FromFloat(bc.config.FeePerExtraByte)*util.Fixed8(txSize-maxFree) {
|
||||
netFee < (util.Fixed8FromFloat(bc.config.LowPriorityThreshold)+util.Fixed8FromFloat(bc.config.FeePerExtraByte)*util.Fixed8(txSize-maxFree)) {
|
||||
return ErrPolicy
|
||||
}
|
||||
}
|
||||
if t.Type == transaction.InvocationType && netFee < bc.config.MinimumNetworkFee {
|
||||
return ErrPolicy
|
||||
}
|
||||
}
|
||||
if err := bc.memPool.Add(t, bc); err != nil {
|
||||
switch err {
|
||||
|
@ -1853,23 +2284,14 @@ func (bc *Blockchain) GetEnrollments() ([]*state.Validator, error) {
|
|||
for _, validator := range validators {
|
||||
if validator.Registered {
|
||||
result = append(result, validator)
|
||||
continue
|
||||
}
|
||||
}
|
||||
for _, sBValidator := range uniqueSBValidators {
|
||||
isAdded := false
|
||||
for _, v := range result {
|
||||
if v.PublicKey == sBValidator {
|
||||
isAdded = true
|
||||
for _, sbValidator := range uniqueSBValidators {
|
||||
if validator.PublicKey.Equal(sbValidator) {
|
||||
result = append(result, validator)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isAdded {
|
||||
result = append(result, &state.Validator{
|
||||
PublicKey: sBValidator,
|
||||
Registered: false,
|
||||
Votes: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
@ -2000,8 +2422,8 @@ func (bc *Blockchain) GetScriptHashesForVerifying(t *transaction.Transaction) ([
|
|||
}
|
||||
|
||||
// GetTestVM returns a VM and a Store setup for a test run of some sort of code.
|
||||
func (bc *Blockchain) GetTestVM() *vm.VM {
|
||||
systemInterop := bc.newInteropContext(trigger.Application, bc.dao, nil, nil)
|
||||
func (bc *Blockchain) GetTestVM(tx *transaction.Transaction) *vm.VM {
|
||||
systemInterop := bc.newInteropContext(trigger.Application, bc.dao, nil, tx)
|
||||
vm := systemInterop.SpawnVM()
|
||||
vm.SetPriceGetter(getPrice)
|
||||
return vm
|
||||
|
@ -2088,7 +2510,6 @@ func (bc *Blockchain) verifyTxWitnesses(t *transaction.Transaction, block *block
|
|||
return errors.Errorf("expected len(hashes) == len(witnesses). got: %d != %d", len(hashes), len(witnesses))
|
||||
}
|
||||
sort.Slice(hashes, func(i, j int) bool { return hashes[i].Less(hashes[j]) })
|
||||
sort.Slice(witnesses, func(i, j int) bool { return witnesses[i].ScriptHash().Less(witnesses[j].ScriptHash()) })
|
||||
interopCtx := bc.newInteropContext(trigger.Verification, bc.dao, block, t)
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
err := bc.verifyHashAgainstScript(hashes[i], &witnesses[i], t.VerificationHash(), interopCtx, false)
|
||||
|
|
|
@ -2,13 +2,19 @@ package core
|
|||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -181,6 +187,7 @@ func TestGetClaimable(t *testing.T) {
|
|||
bc.generationAmount = []int{4, 3, 2, 1}
|
||||
bc.decrementInterval = 2
|
||||
_, err := bc.genBlocks(10)
|
||||
bc.noBonusHeight = 6 // stop right before `1`
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("first generation period", func(t *testing.T) {
|
||||
|
@ -200,16 +207,40 @@ func TestGetClaimable(t *testing.T) {
|
|||
t.Run("start from the 2-nd block", func(t *testing.T) {
|
||||
amount, sysfee, err := bc.CalculateClaimable(util.Fixed8FromInt64(1), 1, 7)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 4+3+3+2+2+1, amount)
|
||||
require.EqualValues(t, 4+3+3+2+2, amount)
|
||||
require.EqualValues(t, 0, sysfee)
|
||||
})
|
||||
|
||||
t.Run("end height after generation has ended", func(t *testing.T) {
|
||||
amount, sysfee, err := bc.CalculateClaimable(util.Fixed8FromInt64(1), 1, 10)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 4+3+3+2+2, amount)
|
||||
require.EqualValues(t, 0, sysfee)
|
||||
})
|
||||
|
||||
t.Run("end height after generation has ended, noBonusHeight is very big", func(t *testing.T) {
|
||||
bc.noBonusHeight = 20
|
||||
amount, sysfee, err := bc.CalculateClaimable(util.Fixed8FromInt64(1), 1, 10)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 4+3+3+2+2+1+1, amount)
|
||||
require.EqualValues(t, 0, sysfee)
|
||||
})
|
||||
|
||||
t.Run("end height after generation has ended, noBonusHeight is 0", func(t *testing.T) {
|
||||
bc.noBonusHeight = 0
|
||||
amount, sysfee, err := bc.CalculateClaimable(util.Fixed8FromInt64(1), 1, 10)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 4+3+3+2+2+1+1, amount)
|
||||
require.EqualValues(t, 0, sysfee)
|
||||
})
|
||||
|
||||
t.Run("noBonusHeight is not divisible by decrement interval", func(t *testing.T) {
|
||||
bc.noBonusHeight = 5
|
||||
amount, sysfee, err := bc.CalculateClaimable(util.Fixed8FromInt64(1), 1, 10)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 4+3+3+2, amount)
|
||||
require.EqualValues(t, 0, sysfee)
|
||||
})
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
|
@ -229,3 +260,109 @@ func TestClose(t *testing.T) {
|
|||
// This should never be executed.
|
||||
assert.Nil(t, t)
|
||||
}
|
||||
|
||||
func TestSubscriptions(t *testing.T) {
|
||||
// We use buffering here as a substitute for reader goroutines, events
|
||||
// get queued up and we read them one by one here.
|
||||
const chBufSize = 16
|
||||
blockCh := make(chan *block.Block, chBufSize)
|
||||
txCh := make(chan *transaction.Transaction, chBufSize)
|
||||
notificationCh := make(chan *state.NotificationEvent, chBufSize)
|
||||
executionCh := make(chan *state.AppExecResult, chBufSize)
|
||||
|
||||
bc := newTestChain(t)
|
||||
defer bc.Close()
|
||||
bc.SubscribeForBlocks(blockCh)
|
||||
bc.SubscribeForTransactions(txCh)
|
||||
bc.SubscribeForNotifications(notificationCh)
|
||||
bc.SubscribeForExecutions(executionCh)
|
||||
|
||||
assert.Empty(t, notificationCh)
|
||||
assert.Empty(t, executionCh)
|
||||
assert.Empty(t, blockCh)
|
||||
assert.Empty(t, txCh)
|
||||
|
||||
blocks, err := bc.genBlocks(1)
|
||||
require.NoError(t, err)
|
||||
assert.Eventually(t, func() bool { return len(blockCh) != 0 && len(txCh) != 0 }, time.Second, 10*time.Millisecond)
|
||||
assert.Empty(t, notificationCh)
|
||||
assert.Empty(t, executionCh)
|
||||
|
||||
b := <-blockCh
|
||||
tx := <-txCh
|
||||
assert.Equal(t, blocks[0], b)
|
||||
assert.Equal(t, blocks[0].Transactions[0], tx)
|
||||
assert.Empty(t, blockCh)
|
||||
assert.Empty(t, txCh)
|
||||
|
||||
acc0, err := wallet.NewAccountFromWIF(privNetKeys[0])
|
||||
require.NoError(t, err)
|
||||
addr0, err := address.StringToUint160(acc0.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
script := io.NewBufBinWriter()
|
||||
emit.Bytes(script.BinWriter, []byte("yay!"))
|
||||
emit.Syscall(script.BinWriter, "Neo.Runtime.Notify")
|
||||
require.NoError(t, script.Err)
|
||||
txGood1 := transaction.NewInvocationTX(script.Bytes(), 0)
|
||||
txGood1.AddVerificationHash(addr0)
|
||||
require.NoError(t, acc0.SignTx(txGood1))
|
||||
|
||||
// Reset() reuses the script buffer and we need to keep scripts.
|
||||
script = io.NewBufBinWriter()
|
||||
emit.Bytes(script.BinWriter, []byte("nay!"))
|
||||
emit.Syscall(script.BinWriter, "Neo.Runtime.Notify")
|
||||
emit.Opcode(script.BinWriter, opcode.THROW)
|
||||
require.NoError(t, script.Err)
|
||||
txBad := transaction.NewInvocationTX(script.Bytes(), 0)
|
||||
txBad.AddVerificationHash(addr0)
|
||||
require.NoError(t, acc0.SignTx(txBad))
|
||||
|
||||
script = io.NewBufBinWriter()
|
||||
emit.Bytes(script.BinWriter, []byte("yay! yay! yay!"))
|
||||
emit.Syscall(script.BinWriter, "Neo.Runtime.Notify")
|
||||
require.NoError(t, script.Err)
|
||||
txGood2 := transaction.NewInvocationTX(script.Bytes(), 0)
|
||||
txGood2.AddVerificationHash(addr0)
|
||||
require.NoError(t, acc0.SignTx(txGood2))
|
||||
|
||||
txMiner := newMinerTX()
|
||||
invBlock := newBlock(bc.config, bc.BlockHeight()+1, bc.CurrentHeaderHash(), txMiner, txGood1, txBad, txGood2)
|
||||
require.NoError(t, bc.AddBlock(invBlock))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return len(blockCh) != 0 && len(txCh) != 0 &&
|
||||
len(notificationCh) != 0 && len(executionCh) != 0
|
||||
}, time.Second, 10*time.Millisecond)
|
||||
|
||||
b = <-blockCh
|
||||
require.Equal(t, invBlock, b)
|
||||
assert.Empty(t, blockCh)
|
||||
|
||||
// Follow in-block transaction order.
|
||||
for _, txExpected := range invBlock.Transactions {
|
||||
tx = <-txCh
|
||||
require.Equal(t, txExpected, tx)
|
||||
if txExpected.Type == transaction.InvocationType {
|
||||
exec := <-executionCh
|
||||
require.Equal(t, tx.Hash(), exec.TxHash)
|
||||
if exec.VMState == "HALT" {
|
||||
notif := <-notificationCh
|
||||
inv := tx.Data.(*transaction.InvocationTX)
|
||||
require.Equal(t, hash.Hash160(inv.Script), notif.ScriptHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Empty(t, txCh)
|
||||
assert.Empty(t, notificationCh)
|
||||
assert.Empty(t, executionCh)
|
||||
|
||||
bc.UnsubscribeFromBlocks(blockCh)
|
||||
bc.UnsubscribeFromTransactions(txCh)
|
||||
bc.UnsubscribeFromNotifications(notificationCh)
|
||||
bc.UnsubscribeFromExecutions(executionCh)
|
||||
|
||||
// Ensure that new blocks are processed correctly after unsubscription.
|
||||
_, err = bc.genBlocks(2 * chBufSize)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -18,13 +18,15 @@ type Blockchainer interface {
|
|||
GetConfig() config.ProtocolConfiguration
|
||||
AddHeaders(...*block.Header) error
|
||||
AddBlock(*block.Block) error
|
||||
BlockHeight() uint32
|
||||
AddStateRoot(r *state.MPTRoot) error
|
||||
CalculateClaimable(value util.Fixed8, startHeight, endHeight uint32) (util.Fixed8, util.Fixed8, error)
|
||||
Close()
|
||||
HeaderHeight() uint32
|
||||
GetBlock(hash util.Uint256) (*block.Block, error)
|
||||
GetContractState(hash util.Uint160) *state.Contract
|
||||
GetEnrollments() ([]*state.Validator, error)
|
||||
ForEachNEP5Transfer(util.Uint160, *state.NEP5Transfer, func() (bool, error)) error
|
||||
ForEachTransfer(util.Uint160, *state.Transfer, func() (bool, error)) error
|
||||
GetHeaderHash(int) util.Uint256
|
||||
GetHeader(hash util.Uint256) (*block.Header, error)
|
||||
CurrentHeaderHash() util.Uint256
|
||||
|
@ -34,18 +36,30 @@ type Blockchainer interface {
|
|||
GetAssetState(util.Uint256) *state.Asset
|
||||
GetAccountState(util.Uint160) *state.Account
|
||||
GetAppExecResult(util.Uint256) (*state.AppExecResult, error)
|
||||
GetNEP5TransferLog(util.Uint160) *state.NEP5TransferLog
|
||||
GetNEP5Metadata(util.Uint160) (*state.NEP5Metadata, error)
|
||||
GetNEP5Balances(util.Uint160) *state.NEP5Balances
|
||||
GetValidators(txes ...*transaction.Transaction) ([]*keys.PublicKey, error)
|
||||
GetScriptHashesForVerifying(*transaction.Transaction) ([]util.Uint160, error)
|
||||
GetStateProof(root util.Uint256, key []byte) ([][]byte, error)
|
||||
GetStateRoot(height uint32) (*state.MPTRootState, error)
|
||||
GetStorageItem(scripthash util.Uint160, key []byte) *state.StorageItem
|
||||
GetStorageItems(hash util.Uint160) (map[string]*state.StorageItem, error)
|
||||
GetTestVM() *vm.VM
|
||||
GetSystemFeeAmount(h util.Uint256) uint32
|
||||
GetTestVM(tx *transaction.Transaction) *vm.VM
|
||||
GetTransaction(util.Uint256) (*transaction.Transaction, uint32, error)
|
||||
GetUnspentCoinState(util.Uint256) *state.UnspentCoin
|
||||
References(t *transaction.Transaction) ([]transaction.InOut, error)
|
||||
mempool.Feer // fee interface
|
||||
PoolTx(*transaction.Transaction) error
|
||||
StateHeight() uint32
|
||||
SubscribeForBlocks(ch chan<- *block.Block)
|
||||
SubscribeForExecutions(ch chan<- *state.AppExecResult)
|
||||
SubscribeForNotifications(ch chan<- *state.NotificationEvent)
|
||||
SubscribeForTransactions(ch chan<- *transaction.Transaction)
|
||||
VerifyTx(*transaction.Transaction, *block.Block) error
|
||||
GetMemPool() *mempool.Pool
|
||||
UnsubscribeFromBlocks(ch chan<- *block.Block)
|
||||
UnsubscribeFromExecutions(ch chan<- *state.AppExecResult)
|
||||
UnsubscribeFromNotifications(ch chan<- *state.NotificationEvent)
|
||||
UnsubscribeFromTransactions(ch chan<- *transaction.Transaction)
|
||||
}
|
||||
|
|
27
pkg/consensus/cache.go → pkg/core/cache/cache.go
vendored
27
pkg/consensus/cache.go → pkg/core/cache/cache.go
vendored
|
@ -1,4 +1,4 @@
|
|||
package consensus
|
||||
package cache
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
@ -7,9 +7,9 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// relayCache is a payload cache which is used to store
|
||||
// HashCache is a payload cache which is used to store
|
||||
// last consensus payloads.
|
||||
type relayCache struct {
|
||||
type HashCache struct {
|
||||
*sync.RWMutex
|
||||
|
||||
maxCap int
|
||||
|
@ -17,13 +17,14 @@ type relayCache struct {
|
|||
queue *list.List
|
||||
}
|
||||
|
||||
// hashable is a type of items which can be stored in the relayCache.
|
||||
type hashable interface {
|
||||
// Hashable is a type of items which can be stored in the HashCache.
|
||||
type Hashable interface {
|
||||
Hash() util.Uint256
|
||||
}
|
||||
|
||||
func newFIFOCache(capacity int) *relayCache {
|
||||
return &relayCache{
|
||||
// NewFIFOCache returns new FIFO cache with the specified capacity.
|
||||
func NewFIFOCache(capacity int) *HashCache {
|
||||
return &HashCache{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
|
||||
maxCap: capacity,
|
||||
|
@ -33,7 +34,7 @@ func newFIFOCache(capacity int) *relayCache {
|
|||
}
|
||||
|
||||
// Add adds payload into a cache if it doesn't already exist.
|
||||
func (c *relayCache) Add(p hashable) {
|
||||
func (c *HashCache) Add(p Hashable) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
|
@ -45,7 +46,7 @@ func (c *relayCache) Add(p hashable) {
|
|||
if c.queue.Len() >= c.maxCap {
|
||||
first := c.queue.Front()
|
||||
c.queue.Remove(first)
|
||||
delete(c.elems, first.Value.(hashable).Hash())
|
||||
delete(c.elems, first.Value.(Hashable).Hash())
|
||||
}
|
||||
|
||||
e := c.queue.PushBack(p)
|
||||
|
@ -53,7 +54,7 @@ func (c *relayCache) Add(p hashable) {
|
|||
}
|
||||
|
||||
// Has checks if an item is already in cache.
|
||||
func (c *relayCache) Has(h util.Uint256) bool {
|
||||
func (c *HashCache) Has(h util.Uint256) bool {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
|
@ -61,13 +62,13 @@ func (c *relayCache) Has(h util.Uint256) bool {
|
|||
}
|
||||
|
||||
// Get returns payload with the specified hash from cache.
|
||||
func (c *relayCache) Get(h util.Uint256) hashable {
|
||||
func (c *HashCache) Get(h util.Uint256) Hashable {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
e, ok := c.elems[h]
|
||||
if !ok {
|
||||
return hashable(nil)
|
||||
return Hashable(nil)
|
||||
}
|
||||
return e.Value.(hashable)
|
||||
return e.Value.(Hashable)
|
||||
}
|
|
@ -1,17 +1,17 @@
|
|||
package consensus
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/dbft/payload"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/random"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRelayCache_Add(t *testing.T) {
|
||||
const capacity = 3
|
||||
payloads := getDifferentPayloads(t, capacity+1)
|
||||
c := newFIFOCache(capacity)
|
||||
payloads := getDifferentItems(t, capacity+1)
|
||||
c := NewFIFOCache(capacity)
|
||||
require.Equal(t, 0, c.queue.Len())
|
||||
require.Equal(t, 0, len(c.elems))
|
||||
|
||||
|
@ -46,18 +46,15 @@ func TestRelayCache_Add(t *testing.T) {
|
|||
require.Equal(t, nil, c.Get(payloads[1].Hash()))
|
||||
}
|
||||
|
||||
func getDifferentPayloads(t *testing.T, n int) (payloads []Payload) {
|
||||
payloads = make([]Payload, n)
|
||||
for i := range payloads {
|
||||
var sign [signatureSize]byte
|
||||
random.Fill(sign[:])
|
||||
type testHashable []byte
|
||||
|
||||
payloads[i].SetValidatorIndex(uint16(i))
|
||||
payloads[i].SetType(payload.MessageType(commitType))
|
||||
payloads[i].payload = &commit{
|
||||
signature: sign,
|
||||
}
|
||||
// Hash implements Hashable.
|
||||
func (h testHashable) Hash() util.Uint256 { return hash.Sha256(h) }
|
||||
|
||||
func getDifferentItems(t *testing.T, n int) []testHashable {
|
||||
items := make([]testHashable, n)
|
||||
for i := range items {
|
||||
items[i] = []byte{byte(i)}
|
||||
}
|
||||
|
||||
return
|
||||
return items
|
||||
}
|
|
@ -1,9 +1,12 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
@ -13,11 +16,16 @@ import (
|
|||
// objects in the storeBlock().
|
||||
type Cached struct {
|
||||
DAO
|
||||
accounts map[util.Uint160]*state.Account
|
||||
contracts map[util.Uint160]*state.Contract
|
||||
unspents map[util.Uint256]*state.UnspentCoin
|
||||
balances map[util.Uint160]*state.NEP5Balances
|
||||
transfers map[util.Uint160]map[uint32]*state.NEP5TransferLog
|
||||
accounts map[util.Uint160]*state.Account
|
||||
contracts map[util.Uint160]*state.Contract
|
||||
unspents map[util.Uint256]*state.UnspentCoin
|
||||
balances map[util.Uint160]*state.NEP5Balances
|
||||
nep5transfers map[util.Uint160]map[uint32]*state.TransferLog
|
||||
transfers map[util.Uint160]map[uint32]*state.TransferLog
|
||||
nextBatch map[util.Uint160]uint32
|
||||
storage *itemCache
|
||||
|
||||
dropNEP5Cache bool
|
||||
}
|
||||
|
||||
// NewCached returns new Cached wrapping around given backing store.
|
||||
|
@ -26,8 +34,30 @@ func NewCached(d DAO) *Cached {
|
|||
ctrs := make(map[util.Uint160]*state.Contract)
|
||||
unspents := make(map[util.Uint256]*state.UnspentCoin)
|
||||
balances := make(map[util.Uint160]*state.NEP5Balances)
|
||||
transfers := make(map[util.Uint160]map[uint32]*state.NEP5TransferLog)
|
||||
return &Cached{d.GetWrapped(), accs, ctrs, unspents, balances, transfers}
|
||||
nep5transfers := make(map[util.Uint160]map[uint32]*state.TransferLog)
|
||||
transfers := make(map[util.Uint160]map[uint32]*state.TransferLog)
|
||||
nextBatch := make(map[util.Uint160]uint32)
|
||||
st := newItemCache()
|
||||
dao := d.GetWrapped()
|
||||
if cd, ok := dao.(*Cached); ok {
|
||||
for h, m := range cd.storage.st {
|
||||
for _, k := range cd.storage.keys[h] {
|
||||
st.put(h, []byte(k), m[k].State, copyItem(&m[k].StorageItem))
|
||||
}
|
||||
}
|
||||
}
|
||||
return &Cached{
|
||||
DAO: dao,
|
||||
accounts: accs,
|
||||
contracts: ctrs,
|
||||
unspents: unspents,
|
||||
balances: balances,
|
||||
nep5transfers: nep5transfers,
|
||||
transfers: transfers,
|
||||
nextBatch: nextBatch,
|
||||
storage: st,
|
||||
dropNEP5Cache: false,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccountStateOrNew retrieves Account from cache or underlying store
|
||||
|
@ -91,6 +121,52 @@ func (cd *Cached) PutUnspentCoinState(hash util.Uint256, ucs *state.UnspentCoin)
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNextTransferBatch returns index for the transfer batch to write to.
|
||||
func (cd *Cached) GetNextTransferBatch(acc util.Uint160) (uint32, error) {
|
||||
if n, ok := cd.nextBatch[acc]; ok {
|
||||
return n, nil
|
||||
}
|
||||
return cd.DAO.GetNextTransferBatch(acc)
|
||||
}
|
||||
|
||||
// PutNextTransferBatch sets index of the transfer batch to write to.
|
||||
func (cd *Cached) PutNextTransferBatch(acc util.Uint160, num uint32) error {
|
||||
cd.nextBatch[acc] = num
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTransferLog retrieves TransferLog for the acc.
|
||||
func (cd *Cached) GetTransferLog(acc util.Uint160, index uint32) (*state.TransferLog, error) {
|
||||
ts := cd.transfers[acc]
|
||||
if ts != nil && ts[index] != nil {
|
||||
return ts[index], nil
|
||||
}
|
||||
return cd.DAO.GetTransferLog(acc, index)
|
||||
}
|
||||
|
||||
// PutTransferLog saves TransferLog for the acc.
|
||||
func (cd *Cached) PutTransferLog(acc util.Uint160, index uint32, bs *state.TransferLog) error {
|
||||
ts := cd.transfers[acc]
|
||||
if ts == nil {
|
||||
ts = make(map[uint32]*state.TransferLog, 2)
|
||||
cd.transfers[acc] = ts
|
||||
}
|
||||
ts[index] = bs
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendTransfer appends new transfer to a transfer event log.
|
||||
func (cd *Cached) AppendTransfer(acc util.Uint160, index uint32, tr *state.Transfer) (bool, error) {
|
||||
lg, err := cd.GetTransferLog(acc, index)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := lg.Append(tr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return lg.Size() >= transferBatchSize, cd.PutTransferLog(acc, index, lg)
|
||||
}
|
||||
|
||||
// GetNEP5Balances retrieves NEP5Balances for the acc.
|
||||
func (cd *Cached) GetNEP5Balances(acc util.Uint160) (*state.NEP5Balances, error) {
|
||||
if bs := cd.balances[acc]; bs != nil {
|
||||
|
@ -105,21 +181,21 @@ func (cd *Cached) PutNEP5Balances(acc util.Uint160, bs *state.NEP5Balances) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNEP5TransferLog retrieves NEP5TransferLog for the acc.
|
||||
func (cd *Cached) GetNEP5TransferLog(acc util.Uint160, index uint32) (*state.NEP5TransferLog, error) {
|
||||
ts := cd.transfers[acc]
|
||||
// GetNEP5TransferLog retrieves TransferLog for the acc.
|
||||
func (cd *Cached) GetNEP5TransferLog(acc util.Uint160, index uint32) (*state.TransferLog, error) {
|
||||
ts := cd.nep5transfers[acc]
|
||||
if ts != nil && ts[index] != nil {
|
||||
return ts[index], nil
|
||||
}
|
||||
return cd.DAO.GetNEP5TransferLog(acc, index)
|
||||
}
|
||||
|
||||
// PutNEP5TransferLog saves NEP5TransferLog for the acc.
|
||||
func (cd *Cached) PutNEP5TransferLog(acc util.Uint160, index uint32, bs *state.NEP5TransferLog) error {
|
||||
ts := cd.transfers[acc]
|
||||
// PutNEP5TransferLog saves TransferLog for the acc.
|
||||
func (cd *Cached) PutNEP5TransferLog(acc util.Uint160, index uint32, bs *state.TransferLog) error {
|
||||
ts := cd.nep5transfers[acc]
|
||||
if ts == nil {
|
||||
ts = make(map[uint32]*state.NEP5TransferLog, 2)
|
||||
cd.transfers[acc] = ts
|
||||
ts = make(map[uint32]*state.TransferLog, 2)
|
||||
cd.nep5transfers[acc] = ts
|
||||
}
|
||||
ts[index] = bs
|
||||
return nil
|
||||
|
@ -137,17 +213,86 @@ func (cd *Cached) AppendNEP5Transfer(acc util.Uint160, index uint32, tr *state.N
|
|||
return lg.Size() >= nep5TransferBatchSize, cd.PutNEP5TransferLog(acc, index, lg)
|
||||
}
|
||||
|
||||
// MigrateNEP5Balances migrates NEP5 balances from old contract to the new one.
|
||||
func (cd *Cached) MigrateNEP5Balances(from, to util.Uint160) error {
|
||||
var (
|
||||
simpleDAO *Simple
|
||||
cachedDAO = cd
|
||||
ok bool
|
||||
w = io.NewBufBinWriter()
|
||||
)
|
||||
for simpleDAO == nil {
|
||||
simpleDAO, ok = cachedDAO.DAO.(*Simple)
|
||||
if !ok {
|
||||
cachedDAO, ok = cachedDAO.DAO.(*Cached)
|
||||
if !ok {
|
||||
panic("uknown DAO")
|
||||
}
|
||||
}
|
||||
}
|
||||
for acc, bs := range cd.balances {
|
||||
err := simpleDAO.putNEP5Balances(acc, bs, w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Reset()
|
||||
}
|
||||
cd.dropNEP5Cache = true
|
||||
var store = simpleDAO.Store
|
||||
// Create another layer of cache because we can't change original storage
|
||||
// while seeking.
|
||||
var upStore = storage.NewMemCachedStore(store)
|
||||
store.Seek([]byte{byte(storage.STNEP5Balances)}, func(k, v []byte) {
|
||||
if !bytes.Contains(v, from[:]) {
|
||||
return
|
||||
}
|
||||
bs := state.NewNEP5Balances()
|
||||
reader := io.NewBinReaderFromBuf(v)
|
||||
bs.DecodeBinary(reader)
|
||||
if reader.Err != nil {
|
||||
panic("bad nep5 balances")
|
||||
}
|
||||
tr, ok := bs.Trackers[from]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(bs.Trackers, from)
|
||||
bs.Trackers[to] = tr
|
||||
w.Reset()
|
||||
bs.EncodeBinary(w.BinWriter)
|
||||
if w.Err != nil {
|
||||
panic("error on nep5 balance encoding")
|
||||
}
|
||||
err := upStore.Put(k, w.Bytes())
|
||||
if err != nil {
|
||||
panic("can't put value in the DB")
|
||||
}
|
||||
})
|
||||
_, err := upStore.Persist()
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist flushes all the changes made into the (supposedly) persistent
|
||||
// underlying store.
|
||||
func (cd *Cached) Persist() (int, error) {
|
||||
if err := cd.FlushStorage(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
lowerCache, ok := cd.DAO.(*Cached)
|
||||
// If the lower DAO is Cached, we only need to flush the MemCached DB.
|
||||
// This actually breaks DAO interface incapsulation, but for our current
|
||||
// usage scenario it should be good enough if cd doesn't modify object
|
||||
// caches (accounts/contracts/etc) in any way.
|
||||
if ok {
|
||||
if cd.dropNEP5Cache {
|
||||
lowerCache.balances = make(map[util.Uint160]*state.NEP5Balances)
|
||||
}
|
||||
var simpleCache *Simple
|
||||
for simpleCache == nil {
|
||||
if err := lowerCache.FlushStorage(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
simpleCache, ok = lowerCache.DAO.(*Simple)
|
||||
if !ok {
|
||||
lowerCache, ok = cd.DAO.(*Cached)
|
||||
|
@ -181,7 +326,7 @@ func (cd *Cached) Persist() (int, error) {
|
|||
}
|
||||
buf.Reset()
|
||||
}
|
||||
for acc, ts := range cd.transfers {
|
||||
for acc, ts := range cd.nep5transfers {
|
||||
for ind, lg := range ts {
|
||||
err := cd.DAO.PutNEP5TransferLog(acc, ind, lg)
|
||||
if err != nil {
|
||||
|
@ -189,6 +334,20 @@ func (cd *Cached) Persist() (int, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
for acc, ts := range cd.transfers {
|
||||
for ind, lg := range ts {
|
||||
err := cd.DAO.PutTransferLog(acc, ind, lg)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
for acc, nb := range cd.nextBatch {
|
||||
err := cd.DAO.PutNextTransferBatch(acc, nb)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return cd.DAO.Persist()
|
||||
}
|
||||
|
||||
|
@ -199,6 +358,210 @@ func (cd *Cached) GetWrapped() DAO {
|
|||
cd.contracts,
|
||||
cd.unspents,
|
||||
cd.balances,
|
||||
cd.nep5transfers,
|
||||
cd.transfers,
|
||||
cd.nextBatch,
|
||||
cd.storage,
|
||||
false,
|
||||
}
|
||||
}
|
||||
|
||||
// FlushStorage flushes storage changes to the underlying DAO.
|
||||
func (cd *Cached) FlushStorage() error {
|
||||
if d, ok := cd.DAO.(*Cached); ok {
|
||||
d.storage.st = cd.storage.st
|
||||
d.storage.keys = cd.storage.keys
|
||||
return nil
|
||||
}
|
||||
for h, items := range cd.storage.st {
|
||||
for _, k := range cd.storage.keys[h] {
|
||||
ti := items[k]
|
||||
switch ti.State {
|
||||
case putOp, addOp:
|
||||
err := cd.DAO.PutStorageItem(h, []byte(k), &ti.StorageItem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case delOp:
|
||||
err := cd.DAO.DeleteStorageItem(h, []byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ti.State |= flushedState
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyItem(si *state.StorageItem) *state.StorageItem {
|
||||
val := make([]byte, len(si.Value))
|
||||
copy(val, si.Value)
|
||||
return &state.StorageItem{
|
||||
Value: val,
|
||||
IsConst: si.IsConst,
|
||||
}
|
||||
}
|
||||
|
||||
// GetStorageItem returns StorageItem if it exists in the given store.
|
||||
func (cd *Cached) GetStorageItem(scripthash util.Uint160, key []byte) *state.StorageItem {
|
||||
return cd.getStorageItemInt(scripthash, key, true)
|
||||
}
|
||||
|
||||
// getStorageItemNoCache is non-caching GetStorageItem version.
|
||||
func (cd *Cached) getStorageItemNoCache(scripthash util.Uint160, key []byte) *state.StorageItem {
|
||||
return cd.getStorageItemInt(scripthash, key, false)
|
||||
}
|
||||
|
||||
// getStorageItemInt is an internal GetStorageItem that can either cache read
|
||||
// (for upper Cached) or not do so (for lower Cached that should only be updated
|
||||
// on persist).
|
||||
func (cd *Cached) getStorageItemInt(scripthash util.Uint160, key []byte, putToCache bool) *state.StorageItem {
|
||||
ti := cd.storage.getItem(scripthash, key)
|
||||
if ti != nil {
|
||||
if ti.State&delOp != 0 {
|
||||
return nil
|
||||
}
|
||||
return copyItem(&ti.StorageItem)
|
||||
}
|
||||
|
||||
// Gets shouldn't affect lower Cached.storage until Persist.
|
||||
var si *state.StorageItem
|
||||
if lowerCached, ok := cd.DAO.(*Cached); ok {
|
||||
si = lowerCached.getStorageItemNoCache(scripthash, key)
|
||||
} else {
|
||||
si = cd.DAO.GetStorageItem(scripthash, key)
|
||||
}
|
||||
if si != nil {
|
||||
if putToCache {
|
||||
cd.storage.put(scripthash, key, getOp, si)
|
||||
}
|
||||
return copyItem(si)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutStorageItem puts given StorageItem for given script with given
|
||||
// key into the given store.
|
||||
func (cd *Cached) PutStorageItem(scripthash util.Uint160, key []byte, si *state.StorageItem) error {
|
||||
item := copyItem(si)
|
||||
ti := cd.storage.getItem(scripthash, key)
|
||||
if ti != nil {
|
||||
if ti.State&(delOp|getOp) != 0 {
|
||||
ti.State = putOp
|
||||
} else {
|
||||
ti.State = addOp
|
||||
}
|
||||
ti.StorageItem = *item
|
||||
return nil
|
||||
}
|
||||
|
||||
op := addOp
|
||||
if it := cd.DAO.GetStorageItem(scripthash, key); it != nil {
|
||||
op = putOp
|
||||
}
|
||||
cd.storage.put(scripthash, key, op, item)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteStorageItem drops storage item for the given script with the
|
||||
// given key from the store.
|
||||
func (cd *Cached) DeleteStorageItem(scripthash util.Uint160, key []byte) error {
|
||||
ti := cd.storage.getItem(scripthash, key)
|
||||
if ti != nil {
|
||||
ti.State = delOp
|
||||
ti.Value = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
it := cd.DAO.GetStorageItem(scripthash, key)
|
||||
if it != nil {
|
||||
cd.storage.put(scripthash, key, delOp, it)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageIteratorFunc is a function returning key-value pair or error.
|
||||
type StorageIteratorFunc func() ([]byte, []byte, error)
|
||||
|
||||
// GetStorageItemsIterator returns iterator over all storage items.
|
||||
// Function returned can be called until first error.
|
||||
func (cd *Cached) GetStorageItemsIterator(hash util.Uint160, prefix []byte) (StorageIteratorFunc, error) {
|
||||
items, err := cd.DAO.GetStorageItems(hash, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Slice(items, func(i, j int) bool { return bytes.Compare(items[i].Key, items[j].Key) == -1 })
|
||||
|
||||
cache := cd.storage.getItems(hash)
|
||||
|
||||
var getItemFromCache StorageIteratorFunc
|
||||
keyIndex := -1
|
||||
getItemFromCache = func() ([]byte, []byte, error) {
|
||||
keyIndex++
|
||||
for ; keyIndex < len(cd.storage.keys[hash]); keyIndex++ {
|
||||
k := cd.storage.keys[hash][keyIndex]
|
||||
v := cache[k]
|
||||
if v.State&delOp == 0 && bytes.HasPrefix([]byte(k), prefix) {
|
||||
val := make([]byte, len(v.StorageItem.Value))
|
||||
copy(val, v.StorageItem.Value)
|
||||
return []byte(k), val, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, errors.New("no more items")
|
||||
}
|
||||
|
||||
var f func() ([]byte, []byte, error)
|
||||
index := -1
|
||||
f = func() ([]byte, []byte, error) {
|
||||
index++
|
||||
for ; index < len(items); index++ {
|
||||
_, ok := cache[string(items[index].Key)]
|
||||
if !ok {
|
||||
return items[index].Key, items[index].Value, nil
|
||||
}
|
||||
}
|
||||
return getItemFromCache()
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// GetStorageItems returns all storage items for a given scripthash.
|
||||
func (cd *Cached) GetStorageItems(hash util.Uint160, prefix []byte) ([]StorageItemWithKey, error) {
|
||||
items, err := cd.DAO.GetStorageItems(hash, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cache := cd.storage.getItems(hash)
|
||||
if len(cache) == 0 {
|
||||
return items, nil
|
||||
}
|
||||
|
||||
result := make([]StorageItemWithKey, 0, len(items))
|
||||
for i := range items {
|
||||
_, ok := cache[string(items[i].Key)]
|
||||
if !ok {
|
||||
result = append(result, items[i])
|
||||
}
|
||||
}
|
||||
sort.Slice(result, func(i, j int) bool { return bytes.Compare(result[i].Key, result[j].Key) == -1 })
|
||||
|
||||
for _, k := range cd.storage.keys[hash] {
|
||||
v := cache[k]
|
||||
if v.State&delOp == 0 {
|
||||
val := make([]byte, len(v.StorageItem.Value))
|
||||
copy(val, v.StorageItem.Value)
|
||||
result = append(result, StorageItemWithKey{
|
||||
StorageItem: state.StorageItem{
|
||||
Value: val,
|
||||
IsConst: v.StorageItem.IsConst,
|
||||
},
|
||||
Key: []byte(k),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"sort"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
|
@ -18,6 +19,7 @@ import (
|
|||
// DAO is a data access object.
|
||||
type DAO interface {
|
||||
AppendNEP5Transfer(acc util.Uint160, index uint32, tr *state.NEP5Transfer) (bool, error)
|
||||
AppendTransfer(acc util.Uint160, index uint32, tr *state.Transfer) (bool, error)
|
||||
DeleteContractState(hash util.Uint160) error
|
||||
DeleteStorageItem(scripthash util.Uint160, key []byte) error
|
||||
DeleteValidatorState(vs *state.Validator) error
|
||||
|
@ -31,12 +33,18 @@ type DAO interface {
|
|||
GetContractState(hash util.Uint160) (*state.Contract, error)
|
||||
GetCurrentBlockHeight() (uint32, error)
|
||||
GetCurrentHeaderHeight() (i uint32, h util.Uint256, err error)
|
||||
GetCurrentStateRootHeight() (uint32, error)
|
||||
GetHeaderHashes() ([]util.Uint256, error)
|
||||
GetNEP5Balances(acc util.Uint160) (*state.NEP5Balances, error)
|
||||
GetNEP5TransferLog(acc util.Uint160, index uint32) (*state.NEP5TransferLog, error)
|
||||
GetNEP5Metadata(h util.Uint160) (*state.NEP5Metadata, error)
|
||||
GetNEP5TransferLog(acc util.Uint160, index uint32) (*state.TransferLog, error)
|
||||
GetNextTransferBatch(acc util.Uint160) (uint32, error)
|
||||
GetStateRoot(height uint32) (*state.MPTRootState, error)
|
||||
PutStateRoot(root *state.MPTRootState) error
|
||||
GetStorageItem(scripthash util.Uint160, key []byte) *state.StorageItem
|
||||
GetStorageItems(hash util.Uint160) (map[string]*state.StorageItem, error)
|
||||
GetStorageItems(hash util.Uint160, prefix []byte) ([]StorageItemWithKey, error)
|
||||
GetTransaction(hash util.Uint256) (*transaction.Transaction, uint32, error)
|
||||
GetTransferLog(acc util.Uint160, index uint32) (*state.TransferLog, error)
|
||||
GetUnspentCoinState(hash util.Uint256) (*state.UnspentCoin, error)
|
||||
GetValidatorState(publicKey *keys.PublicKey) (*state.Validator, error)
|
||||
GetValidatorStateOrNew(publicKey *keys.PublicKey) (*state.Validator, error)
|
||||
|
@ -54,8 +62,11 @@ type DAO interface {
|
|||
PutContractState(cs *state.Contract) error
|
||||
PutCurrentHeader(hashAndIndex []byte) error
|
||||
PutNEP5Balances(acc util.Uint160, bs *state.NEP5Balances) error
|
||||
PutNEP5TransferLog(acc util.Uint160, index uint32, lg *state.NEP5TransferLog) error
|
||||
PutNEP5Metadata(h util.Uint160, meta *state.NEP5Metadata) error
|
||||
PutNEP5TransferLog(acc util.Uint160, index uint32, lg *state.TransferLog) error
|
||||
PutNextTransferBatch(acc util.Uint160, num uint32) error
|
||||
PutStorageItem(scripthash util.Uint160, key []byte, si *state.StorageItem) error
|
||||
PutTransferLog(acc util.Uint160, index uint32, lg *state.TransferLog) error
|
||||
PutUnspentCoinState(hash util.Uint256, ucs *state.UnspentCoin) error
|
||||
PutValidatorState(vs *state.Validator) error
|
||||
PutValidatorsCount(vc *state.ValidatorsCount) error
|
||||
|
@ -70,12 +81,14 @@ type DAO interface {
|
|||
|
||||
// Simple is memCached wrapper around DB, simple DAO implementation.
|
||||
type Simple struct {
|
||||
MPT *mpt.Trie
|
||||
Store *storage.MemCachedStore
|
||||
}
|
||||
|
||||
// NewSimple creates new simple dao using provided backend store.
|
||||
func NewSimple(backend storage.Store) *Simple {
|
||||
return &Simple{Store: storage.NewMemCachedStore(backend)}
|
||||
st := storage.NewMemCachedStore(backend)
|
||||
return &Simple{Store: st}
|
||||
}
|
||||
|
||||
// GetBatch returns currently accumulated DB changeset.
|
||||
|
@ -86,7 +99,9 @@ func (dao *Simple) GetBatch() *storage.MemBatch {
|
|||
// GetWrapped returns new DAO instance with another layer of wrapped
|
||||
// MemCachedStore around the current DAO Store.
|
||||
func (dao *Simple) GetWrapped() DAO {
|
||||
return NewSimple(dao.Store)
|
||||
d := NewSimple(dao.Store)
|
||||
d.MPT = dao.MPT
|
||||
return d
|
||||
}
|
||||
|
||||
// GetAndDecode performs get operation and decoding with serializable structures.
|
||||
|
@ -207,6 +222,22 @@ func (dao *Simple) DeleteContractState(hash util.Uint160) error {
|
|||
return dao.Store.Delete(key)
|
||||
}
|
||||
|
||||
// GetNEP5Metadata returns saved NEP5 metadata for the contract h.
|
||||
func (dao *Simple) GetNEP5Metadata(h util.Uint160) (*state.NEP5Metadata, error) {
|
||||
key := storage.AppendPrefix(storage.STMigration, h.BytesBE())
|
||||
m := new(state.NEP5Metadata)
|
||||
if err := dao.GetAndDecode(m, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// PutNEP5Metadata saves NEP5 metadata for the contract h.
|
||||
func (dao *Simple) PutNEP5Metadata(h util.Uint160, m *state.NEP5Metadata) error {
|
||||
key := storage.AppendPrefix(storage.STMigration, h.BytesBE())
|
||||
return dao.Put(m, key)
|
||||
}
|
||||
|
||||
// -- end contracts.
|
||||
|
||||
// -- start nep5 balances.
|
||||
|
@ -236,7 +267,16 @@ func (dao *Simple) putNEP5Balances(acc util.Uint160, bs *state.NEP5Balances, buf
|
|||
|
||||
// -- start transfer log.
|
||||
|
||||
const nep5TransferBatchSize = 128
|
||||
const nep5TransferBatchSize = 128 * state.NEP5TransferSize
|
||||
const transferBatchSize = 128 * state.TransferSize
|
||||
|
||||
func getTransferLogKey(acc util.Uint160, index uint32) []byte {
|
||||
key := make([]byte, 1+util.Uint160Size+4)
|
||||
key[0] = byte(storage.STTransfers)
|
||||
copy(key[1:], acc.BytesBE())
|
||||
binary.LittleEndian.PutUint32(key[util.Uint160Size:], index)
|
||||
return key
|
||||
}
|
||||
|
||||
func getNEP5TransferLogKey(acc util.Uint160, index uint32) []byte {
|
||||
key := make([]byte, 1+util.Uint160Size+4)
|
||||
|
@ -246,21 +286,77 @@ func getNEP5TransferLogKey(acc util.Uint160, index uint32) []byte {
|
|||
return key
|
||||
}
|
||||
|
||||
// GetNextTransferBatch returns index for the transfer batch to write to.
|
||||
func (dao *Simple) GetNextTransferBatch(acc util.Uint160) (uint32, error) {
|
||||
key := storage.AppendPrefix(storage.STTransfers, acc.BytesBE())
|
||||
val, err := dao.Store.Get(key)
|
||||
if err != nil {
|
||||
if err != storage.ErrKeyNotFound {
|
||||
return 0, err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
return binary.LittleEndian.Uint32(val), nil
|
||||
}
|
||||
|
||||
// PutNextTransferBatch sets index of the transfer batch to write to.
|
||||
func (dao *Simple) PutNextTransferBatch(acc util.Uint160, num uint32) error {
|
||||
key := storage.AppendPrefix(storage.STTransfers, acc.BytesBE())
|
||||
val := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(val, num)
|
||||
return dao.Store.Put(key, val)
|
||||
}
|
||||
|
||||
// GetTransferLog retrieves transfer log from the cache.
|
||||
func (dao *Simple) GetTransferLog(acc util.Uint160, index uint32) (*state.TransferLog, error) {
|
||||
key := getTransferLogKey(acc, index)
|
||||
value, err := dao.Store.Get(key)
|
||||
if err != nil {
|
||||
if err == storage.ErrKeyNotFound {
|
||||
return new(state.TransferLog), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &state.TransferLog{Raw: value}, nil
|
||||
}
|
||||
|
||||
// PutTransferLog saves given transfer log in the cache.
|
||||
func (dao *Simple) PutTransferLog(acc util.Uint160, index uint32, lg *state.TransferLog) error {
|
||||
key := getTransferLogKey(acc, index)
|
||||
return dao.Store.Put(key, lg.Raw)
|
||||
}
|
||||
|
||||
// AppendTransfer appends a single transfer to a log.
|
||||
// First return value signalizes that log size has exceeded batch size.
|
||||
func (dao *Simple) AppendTransfer(acc util.Uint160, index uint32, tr *state.Transfer) (bool, error) {
|
||||
lg, err := dao.GetTransferLog(acc, index)
|
||||
if err != nil {
|
||||
if err != storage.ErrKeyNotFound {
|
||||
return false, err
|
||||
}
|
||||
lg = new(state.TransferLog)
|
||||
}
|
||||
if err := lg.Append(tr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return lg.Size() >= transferBatchSize, dao.PutTransferLog(acc, index, lg)
|
||||
}
|
||||
|
||||
// GetNEP5TransferLog retrieves transfer log from the cache.
|
||||
func (dao *Simple) GetNEP5TransferLog(acc util.Uint160, index uint32) (*state.NEP5TransferLog, error) {
|
||||
func (dao *Simple) GetNEP5TransferLog(acc util.Uint160, index uint32) (*state.TransferLog, error) {
|
||||
key := getNEP5TransferLogKey(acc, index)
|
||||
value, err := dao.Store.Get(key)
|
||||
if err != nil {
|
||||
if err == storage.ErrKeyNotFound {
|
||||
return new(state.NEP5TransferLog), nil
|
||||
return new(state.TransferLog), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &state.NEP5TransferLog{Raw: value}, nil
|
||||
return &state.TransferLog{Raw: value}, nil
|
||||
}
|
||||
|
||||
// PutNEP5TransferLog saves given transfer log in the cache.
|
||||
func (dao *Simple) PutNEP5TransferLog(acc util.Uint160, index uint32, lg *state.NEP5TransferLog) error {
|
||||
func (dao *Simple) PutNEP5TransferLog(acc util.Uint160, index uint32, lg *state.TransferLog) error {
|
||||
key := getNEP5TransferLogKey(acc, index)
|
||||
return dao.Store.Put(key, lg.Raw)
|
||||
}
|
||||
|
@ -273,7 +369,7 @@ func (dao *Simple) AppendNEP5Transfer(acc util.Uint160, index uint32, tr *state.
|
|||
if err != storage.ErrKeyNotFound {
|
||||
return false, err
|
||||
}
|
||||
lg = new(state.NEP5TransferLog)
|
||||
lg = new(state.TransferLog)
|
||||
}
|
||||
if err := lg.Append(tr); err != nil {
|
||||
return false, err
|
||||
|
@ -406,6 +502,79 @@ func (dao *Simple) PutAppExecResult(aer *state.AppExecResult) error {
|
|||
|
||||
// -- start storage item.
|
||||
|
||||
func makeStateRootKey(height uint32) []byte {
|
||||
key := make([]byte, 5)
|
||||
key[0] = byte(storage.DataMPT)
|
||||
binary.LittleEndian.PutUint32(key[1:], height)
|
||||
return key
|
||||
}
|
||||
|
||||
// InitMPT initializes MPT at the given height.
|
||||
func (dao *Simple) InitMPT(height uint32, enableRefCount bool) error {
|
||||
var gcKey = []byte{byte(storage.DataMPT), 1}
|
||||
if height == 0 {
|
||||
dao.MPT = mpt.NewTrie(nil, enableRefCount, dao.Store)
|
||||
var val byte
|
||||
if enableRefCount {
|
||||
val = 1
|
||||
}
|
||||
return dao.Store.Put(gcKey, []byte{val})
|
||||
}
|
||||
var hasRefCount bool
|
||||
if v, err := dao.Store.Get(gcKey); err == nil {
|
||||
hasRefCount = v[0] != 0
|
||||
}
|
||||
if hasRefCount != enableRefCount {
|
||||
return fmt.Errorf("KeepOnlyLatestState setting mismatch: old=%v, new=%v", hasRefCount, enableRefCount)
|
||||
}
|
||||
r, err := dao.GetStateRoot(height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var rootnode mpt.Node
|
||||
if !r.Root.Equals(util.Uint256{}) { // some initial blocks can have root == 0 and it's not a valid root
|
||||
rootnode = mpt.NewHashNode(r.Root)
|
||||
}
|
||||
dao.MPT = mpt.NewTrie(rootnode, enableRefCount, dao.Store)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCurrentStateRootHeight returns current state root height.
|
||||
func (dao *Simple) GetCurrentStateRootHeight() (uint32, error) {
|
||||
key := []byte{byte(storage.DataMPT)}
|
||||
val, err := dao.Store.Get(key)
|
||||
if err != nil {
|
||||
if err == storage.ErrKeyNotFound {
|
||||
err = nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return binary.LittleEndian.Uint32(val), nil
|
||||
}
|
||||
|
||||
// PutCurrentStateRootHeight updates current state root height.
|
||||
func (dao *Simple) PutCurrentStateRootHeight(height uint32) error {
|
||||
key := []byte{byte(storage.DataMPT)}
|
||||
val := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(val, height)
|
||||
return dao.Store.Put(key, val)
|
||||
}
|
||||
|
||||
// GetStateRoot returns state root of a given height.
|
||||
func (dao *Simple) GetStateRoot(height uint32) (*state.MPTRootState, error) {
|
||||
r := new(state.MPTRootState)
|
||||
err := dao.GetAndDecode(r, makeStateRootKey(height))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// PutStateRoot puts state root of a given height into the store.
|
||||
func (dao *Simple) PutStateRoot(r *state.MPTRootState) error {
|
||||
return dao.Put(r, makeStateRootKey(r.Index))
|
||||
}
|
||||
|
||||
// GetStorageItem returns StorageItem if it exists in the given store.
|
||||
func (dao *Simple) GetStorageItem(scripthash util.Uint160, key []byte) *state.StorageItem {
|
||||
b, err := dao.Store.Get(makeStorageItemKey(scripthash, key))
|
||||
|
@ -426,18 +595,39 @@ func (dao *Simple) GetStorageItem(scripthash util.Uint160, key []byte) *state.St
|
|||
// PutStorageItem puts given StorageItem for given script with given
|
||||
// key into the given store.
|
||||
func (dao *Simple) PutStorageItem(scripthash util.Uint160, key []byte, si *state.StorageItem) error {
|
||||
return dao.Put(si, makeStorageItemKey(scripthash, key))
|
||||
stKey := makeStorageItemKey(scripthash, key)
|
||||
v := mpt.ToNeoStorageValue(si)
|
||||
if dao.MPT != nil {
|
||||
k := mpt.ToNeoStorageKey(stKey[1:]) // strip STStorage prefix
|
||||
if err := dao.MPT.Put(k, v); err != nil && err != mpt.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return dao.Store.Put(stKey, v[1:])
|
||||
}
|
||||
|
||||
// DeleteStorageItem drops storage item for the given script with the
|
||||
// given key from the store.
|
||||
func (dao *Simple) DeleteStorageItem(scripthash util.Uint160, key []byte) error {
|
||||
return dao.Store.Delete(makeStorageItemKey(scripthash, key))
|
||||
stKey := makeStorageItemKey(scripthash, key)
|
||||
if dao.MPT != nil {
|
||||
k := mpt.ToNeoStorageKey(stKey[1:]) // strip STStorage prefix
|
||||
if err := dao.MPT.Delete(k); err != nil && err != mpt.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return dao.Store.Delete(stKey)
|
||||
}
|
||||
|
||||
// StorageItemWithKey is a Key-Value pair together with possible const modifier.
|
||||
type StorageItemWithKey struct {
|
||||
state.StorageItem
|
||||
Key []byte
|
||||
}
|
||||
|
||||
// GetStorageItems returns all storage items for a given scripthash.
|
||||
func (dao *Simple) GetStorageItems(hash util.Uint160) (map[string]*state.StorageItem, error) {
|
||||
var siMap = make(map[string]*state.StorageItem)
|
||||
func (dao *Simple) GetStorageItems(hash util.Uint160, prefix []byte) ([]StorageItemWithKey, error) {
|
||||
var res []StorageItemWithKey
|
||||
var err error
|
||||
|
||||
saveToMap := func(k, v []byte) {
|
||||
|
@ -445,21 +635,24 @@ func (dao *Simple) GetStorageItems(hash util.Uint160) (map[string]*state.Storage
|
|||
return
|
||||
}
|
||||
r := io.NewBinReaderFromBuf(v)
|
||||
si := &state.StorageItem{}
|
||||
si.DecodeBinary(r)
|
||||
var s StorageItemWithKey
|
||||
s.StorageItem.DecodeBinary(r)
|
||||
if r.Err != nil {
|
||||
err = r.Err
|
||||
return
|
||||
}
|
||||
|
||||
// Cut prefix and hash.
|
||||
siMap[string(k[21:])] = si
|
||||
// Must copy here, #1468.
|
||||
s.Key = make([]byte, len(k[21:]))
|
||||
copy(s.Key, k[21:])
|
||||
res = append(res, s)
|
||||
}
|
||||
dao.Store.Seek(storage.AppendPrefix(storage.STStorage, hash.BytesLE()), saveToMap)
|
||||
dao.Store.Seek(makeStorageItemKey(hash, prefix), saveToMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return siMap, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// makeStorageItemKey returns a key used to store StorageItem in the DB.
|
||||
|
|
58
pkg/core/dao/storage_item.go
Normal file
58
pkg/core/dao/storage_item.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
type (
|
||||
itemState int
|
||||
|
||||
trackedItem struct {
|
||||
state.StorageItem
|
||||
State itemState
|
||||
}
|
||||
|
||||
itemCache struct {
|
||||
st map[util.Uint160]map[string]*trackedItem
|
||||
keys map[util.Uint160][]string
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
getOp itemState = 1 << iota
|
||||
delOp
|
||||
addOp
|
||||
putOp
|
||||
flushedState
|
||||
)
|
||||
|
||||
func newItemCache() *itemCache {
|
||||
return &itemCache{
|
||||
make(map[util.Uint160]map[string]*trackedItem),
|
||||
make(map[util.Uint160][]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *itemCache) put(h util.Uint160, key []byte, op itemState, item *state.StorageItem) {
|
||||
m := c.getItems(h)
|
||||
m[string(key)] = &trackedItem{
|
||||
StorageItem: *item,
|
||||
State: op,
|
||||
}
|
||||
c.keys[h] = append(c.keys[h], string(key))
|
||||
c.st[h] = m
|
||||
}
|
||||
|
||||
func (c *itemCache) getItem(h util.Uint160, key []byte) *trackedItem {
|
||||
m := c.getItems(h)
|
||||
return m[string(key)]
|
||||
}
|
||||
|
||||
func (c *itemCache) getItems(h util.Uint160) map[string]*trackedItem {
|
||||
m, ok := c.st[h]
|
||||
if !ok {
|
||||
return make(map[string]*trackedItem)
|
||||
}
|
||||
return m
|
||||
}
|
29
pkg/core/doc.go
Normal file
29
pkg/core/doc.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
Package core implements Neo ledger functionality.
|
||||
It's built around the Blockchain structure that maintains state of the ledger.
|
||||
|
||||
Events
|
||||
|
||||
You can subscribe to Blockchain events using a set of Subscribe and Unsubscribe
|
||||
methods. These methods accept channels that will be used to send appropriate
|
||||
events, so you can control buffering. Channels are never closed by Blockchain,
|
||||
you can close them after unsubscription.
|
||||
|
||||
Unlike RPC-level subscriptions these don't allow event filtering because it
|
||||
doesn't improve overall efficiency much (when you're using Blockchain you're
|
||||
in the same process with it and filtering on your side is not that different
|
||||
from filtering on Blockchain side).
|
||||
|
||||
The same level of ordering guarantees as with RPC subscriptions is provided,
|
||||
albeit for a set of event channels, so at first transaction execution is
|
||||
announced via appropriate channels, then followed by notifications generated
|
||||
during this execution, then followed by transaction announcement and then
|
||||
followed by block announcement. Transaction announcements are ordered the same
|
||||
way they're stored in the block.
|
||||
|
||||
Be careful using these subscriptions, this mechanism is not intended to be used
|
||||
by lots of subscribers and failing to read from event channels can affect
|
||||
other Blockchain operations.
|
||||
|
||||
*/
|
||||
package core
|
|
@ -59,6 +59,9 @@ func newBlock(cfg config.ProtocolConfiguration, index uint32, prev util.Uint256,
|
|||
witness := transaction.Witness{
|
||||
VerificationScript: valScript,
|
||||
}
|
||||
if len(txs) == 0 {
|
||||
txs = []*transaction.Transaction{newMinerTX()}
|
||||
}
|
||||
b := &block.Block{
|
||||
Base: block.Base{
|
||||
Version: 0,
|
||||
|
@ -71,7 +74,10 @@ func newBlock(cfg config.ProtocolConfiguration, index uint32, prev util.Uint256,
|
|||
},
|
||||
Transactions: txs,
|
||||
}
|
||||
_ = b.RebuildMerkleRoot()
|
||||
err := b.RebuildMerkleRoot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
invScript := make([]byte, 0)
|
||||
for _, wif := range privNetKeys {
|
||||
|
@ -284,22 +290,30 @@ func TestCreateBasicChain(t *testing.T) {
|
|||
// Push some contract into the chain.
|
||||
avm, err := ioutil.ReadFile(prefix + "test_contract.avm")
|
||||
require.NoError(t, err)
|
||||
t.Logf("contractHash: %s", hash.Hash160(avm).StringLE())
|
||||
|
||||
// Same contract but with different hash.
|
||||
avmOld := append(avm, byte(opcode.RET))
|
||||
t.Logf("contractHash (old): %s", hash.Hash160(avmOld).StringLE())
|
||||
|
||||
var props smartcontract.PropertyState
|
||||
script := io.NewBufBinWriter()
|
||||
emit.Bytes(script.BinWriter, []byte("Da contract dat hallos u"))
|
||||
emit.Bytes(script.BinWriter, []byte("joe@example.com"))
|
||||
emit.Bytes(script.BinWriter, []byte("Random Guy"))
|
||||
emit.Bytes(script.BinWriter, []byte("0.99"))
|
||||
emit.Bytes(script.BinWriter, []byte("Helloer"))
|
||||
contractDesc := []string{
|
||||
"Da contract dat hallos u", // desc
|
||||
"joe@example.com", // email
|
||||
"Random Guy", // author
|
||||
"0.99", // version
|
||||
"Helloer", // name
|
||||
}
|
||||
for i := range contractDesc {
|
||||
emit.String(script.BinWriter, contractDesc[i])
|
||||
}
|
||||
props |= smartcontract.HasStorage
|
||||
emit.Int(script.BinWriter, int64(props))
|
||||
emit.Int(script.BinWriter, int64(5))
|
||||
params := make([]byte, 1)
|
||||
params[0] = byte(7)
|
||||
emit.Bytes(script.BinWriter, params)
|
||||
emit.Bytes(script.BinWriter, avm)
|
||||
emit.Bytes(script.BinWriter, avmOld)
|
||||
emit.Syscall(script.BinWriter, "Neo.Contract.Create")
|
||||
txScript := script.Bytes()
|
||||
|
||||
|
@ -323,7 +337,7 @@ func TestCreateBasicChain(t *testing.T) {
|
|||
|
||||
// Now invoke this contract.
|
||||
script = io.NewBufBinWriter()
|
||||
emit.AppCallWithOperationAndArgs(script.BinWriter, hash.Hash160(avm), "Put", "testkey", "testvalue")
|
||||
emit.AppCallWithOperationAndArgs(script.BinWriter, hash.Hash160(avmOld), "Put", "testkey", "testvalue")
|
||||
|
||||
txInv := transaction.NewInvocationTX(script.Bytes(), 0)
|
||||
b = bc.newBlock(newMinerTX(), txInv)
|
||||
|
@ -353,7 +367,7 @@ func TestCreateBasicChain(t *testing.T) {
|
|||
b = bc.newBlock(newMinerTX(), txNeo0to1)
|
||||
require.NoError(t, bc.AddBlock(b))
|
||||
|
||||
sh := hash.Hash160(avm)
|
||||
sh := hash.Hash160(avmOld)
|
||||
w := io.NewBufBinWriter()
|
||||
emit.AppCallWithOperationAndArgs(w.BinWriter, sh, "init")
|
||||
initTx := transaction.NewInvocationTX(w.Bytes(), 0)
|
||||
|
@ -365,6 +379,39 @@ func TestCreateBasicChain(t *testing.T) {
|
|||
transferTx = newNEP5Transfer(sh, priv0.GetScriptHash(), priv1.GetScriptHash(), 123)
|
||||
b = bc.newBlock(newMinerTX(), transferTx)
|
||||
require.NoError(t, bc.AddBlock(b))
|
||||
t.Logf("txTransfer: %s", transferTx.Hash().StringLE())
|
||||
|
||||
w = io.NewBufBinWriter()
|
||||
args := []interface{}{avm}
|
||||
for i := range contractDesc {
|
||||
args = append(args, contractDesc[i])
|
||||
}
|
||||
emit.AppCallWithOperationAndArgs(w.BinWriter, sh, "migrate", args...)
|
||||
emit.Opcode(w.BinWriter, opcode.THROWIFNOT)
|
||||
invFee = util.Fixed8FromInt64(100)
|
||||
migrateTx := transaction.NewInvocationTX(w.Bytes(), invFee)
|
||||
migrateTx.AddInput(&transaction.Input{
|
||||
PrevHash: txDeploy.Hash(),
|
||||
PrevIndex: 0,
|
||||
})
|
||||
migrateTx.AddOutput(&transaction.Output{
|
||||
AssetID: UtilityTokenID(),
|
||||
Amount: gasOwned - invFee,
|
||||
ScriptHash: priv0.GetScriptHash(),
|
||||
Position: 0,
|
||||
})
|
||||
require.NoError(t, acc0.SignTx(migrateTx))
|
||||
gasOwned -= invFee
|
||||
t.Logf("txMigrate: %s", migrateTx.Hash().StringLE())
|
||||
b = bc.newBlock(newMinerTX(), migrateTx)
|
||||
require.NoError(t, bc.AddBlock(b))
|
||||
|
||||
sh = hash.Hash160(avm)
|
||||
t.Logf("contractHash (new): %s", sh.StringLE())
|
||||
|
||||
transferTx = newNEP5Transfer(sh, priv1.GetScriptHash(), priv0.GetScriptHash(), 2, 1)
|
||||
b = bc.newBlock(newMinerTX(), transferTx)
|
||||
require.NoError(t, bc.AddBlock(b))
|
||||
|
||||
if saveChain {
|
||||
outStream, err := os.Create(prefix + "testblocks.acc")
|
||||
|
@ -399,10 +446,12 @@ func TestCreateBasicChain(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newNEP5Transfer(sc, from, to util.Uint160, amount int64) *transaction.Transaction {
|
||||
func newNEP5Transfer(sc, from, to util.Uint160, amounts ...int64) *transaction.Transaction {
|
||||
w := io.NewBufBinWriter()
|
||||
emit.AppCallWithOperationAndArgs(w.BinWriter, sc, "transfer", from, to, amount)
|
||||
emit.Opcode(w.BinWriter, opcode.THROWIFNOT)
|
||||
for i := range amounts {
|
||||
emit.AppCallWithOperationAndArgs(w.BinWriter, sc, "transfer", from, to, amounts[i])
|
||||
emit.Opcode(w.BinWriter, opcode.THROWIFNOT)
|
||||
}
|
||||
|
||||
script := w.Bytes()
|
||||
return transaction.NewInvocationTX(script, 0)
|
||||
|
|
|
@ -1,18 +1,23 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"math/big"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
gherr "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -177,10 +182,20 @@ func (ic *interopContext) txGetUnspentCoins(v *vm.VM) error {
|
|||
return errors.New("value is not a transaction")
|
||||
}
|
||||
ucs, err := ic.dao.GetUnspentCoinState(tx.Hash())
|
||||
if err != nil {
|
||||
if err == storage.ErrKeyNotFound {
|
||||
v.Estack().PushVal([]vm.StackItem{})
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return errors.New("no unspent coin state found")
|
||||
}
|
||||
v.Estack().PushVal(vm.NewInteropItem(ucs))
|
||||
|
||||
items := make([]vm.StackItem, 0, len(ucs.States))
|
||||
for i := range ucs.States {
|
||||
if ucs.States[i].State&state.CoinSpent == 0 {
|
||||
items = append(items, vm.NewInteropItem(&ucs.States[i].Output))
|
||||
}
|
||||
}
|
||||
v.Estack().PushVal(items)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -236,7 +251,14 @@ func (ic *interopContext) witnessGetVerificationScript(v *vm.VM) error {
|
|||
|
||||
// bcGetValidators returns validators.
|
||||
func (ic *interopContext) bcGetValidators(v *vm.VM) error {
|
||||
validators := ic.dao.GetValidators()
|
||||
valStates := ic.dao.GetValidators()
|
||||
if len(valStates) > vm.MaxArraySize {
|
||||
return errors.New("too many validators")
|
||||
}
|
||||
validators := make([]vm.StackItem, 0, len(valStates))
|
||||
for _, val := range valStates {
|
||||
validators = append(validators, vm.NewByteArrayItem(val.PublicKey.Bytes()))
|
||||
}
|
||||
v.Estack().PushVal(validators)
|
||||
return nil
|
||||
}
|
||||
|
@ -444,22 +466,13 @@ func (ic *interopContext) storageFind(v *vm.VM) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prefix := string(v.Estack().Pop().Bytes())
|
||||
siMap, err := ic.dao.GetStorageItems(stc.ScriptHash)
|
||||
pref := v.Estack().Pop().Bytes()
|
||||
next, err := ic.dao.GetStorageItemsIterator(stc.ScriptHash, pref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filteredMap := vm.NewMapItem()
|
||||
for k, v := range siMap {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
filteredMap.Add(vm.NewByteArrayItem([]byte(k)),
|
||||
vm.NewByteArrayItem(v.Value))
|
||||
}
|
||||
}
|
||||
|
||||
item := vm.NewMapIterator(filteredMap)
|
||||
v.Estack().PushVal(item)
|
||||
item := newStorageIterator(next)
|
||||
v.Estack().PushVal(vm.NewInteropItem(item))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -572,25 +585,75 @@ func (ic *interopContext) contractMigrate(v *vm.VM) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hash := getContextScriptHash(v, 0)
|
||||
if contract.HasStorage() {
|
||||
hash := getContextScriptHash(v, 0)
|
||||
siMap, err := ic.dao.GetStorageItems(hash)
|
||||
siMap, err := ic.dao.GetStorageItems(hash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range siMap {
|
||||
v.IsConst = false
|
||||
err = ic.dao.PutStorageItem(contract.ScriptHash(), []byte(k), v)
|
||||
for i := range siMap {
|
||||
v := siMap[i].StorageItem
|
||||
siMap[i].IsConst = false
|
||||
err = ic.dao.PutStorageItem(contract.ScriptHash(), siMap[i].Key, &v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ic.dao.MigrateNEP5Balances(hash, contract.ScriptHash())
|
||||
|
||||
// save NEP5 metadata if any
|
||||
v := ic.bc.GetTestVM(nil)
|
||||
w := io.NewBufBinWriter()
|
||||
emit.AppCallWithOperationAndArgs(w.BinWriter, hash, "decimals")
|
||||
conf := ic.bc.GetConfig()
|
||||
v.SetGasLimit(conf.GetFreeGas(ic.bc.BlockHeight() + 1)) // BlockHeight() is already persisted, so it's either a new block or test invocation.
|
||||
v.Load(w.Bytes())
|
||||
if err := v.Run(); err == nil && v.Estack().Len() == 1 {
|
||||
res := v.Estack().Pop().Item().ToContractParameter(map[vm.StackItem]bool{})
|
||||
d := int64(-1)
|
||||
switch res.Type {
|
||||
case smartcontract.IntegerType:
|
||||
d = res.Value.(int64)
|
||||
case smartcontract.ByteArrayType:
|
||||
d = emit.BytesToInt(res.Value.([]byte)).Int64()
|
||||
}
|
||||
if d >= 0 {
|
||||
ic.dao.PutNEP5Metadata(hash, &state.NEP5Metadata{Decimals: d})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
v.Estack().PushVal(vm.NewInteropItem(contract))
|
||||
return ic.contractDestroy(v)
|
||||
}
|
||||
|
||||
// secp256k1Recover recovers speck256k1 public key.
|
||||
func (ic *interopContext) secp256k1Recover(v *vm.VM) error {
|
||||
return ic.eccRecover(btcec.S256(), v)
|
||||
}
|
||||
|
||||
// secp256r1Recover recovers speck256r1 public key.
|
||||
func (ic *interopContext) secp256r1Recover(v *vm.VM) error {
|
||||
return ic.eccRecover(elliptic.P256(), v)
|
||||
}
|
||||
|
||||
// eccRecover recovers public key using ECCurve set
|
||||
func (ic *interopContext) eccRecover(curve elliptic.Curve, v *vm.VM) error {
|
||||
rBytes := v.Estack().Pop().Bytes()
|
||||
sBytes := v.Estack().Pop().Bytes()
|
||||
r := new(big.Int).SetBytes(rBytes)
|
||||
s := new(big.Int).SetBytes(sBytes)
|
||||
isEven := v.Estack().Pop().Bool()
|
||||
messageHash := v.Estack().Pop().Bytes()
|
||||
pKey, err := keys.KeyRecover(curve, r, s, messageHash, isEven)
|
||||
if err != nil {
|
||||
v.Estack().PushVal([]byte{})
|
||||
return nil
|
||||
}
|
||||
v.Estack().PushVal(pKey.UncompressedBytes()[1:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// assetCreate creates an asset.
|
||||
func (ic *interopContext) assetCreate(v *vm.VM) error {
|
||||
if ic.trigger != trigger.Application {
|
||||
|
|
|
@ -1,14 +1,17 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/dao"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/random"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
|
@ -457,8 +460,81 @@ func TestAssetGetPrecision(t *testing.T) {
|
|||
require.Equal(t, big.NewInt(int64(assetState.Precision)), precision)
|
||||
}
|
||||
|
||||
func TestSecp256k1Recover(t *testing.T) {
|
||||
v, context, chain := createVM(t)
|
||||
defer chain.Close()
|
||||
|
||||
privateKey, err := btcec.NewPrivateKey(btcec.S256())
|
||||
require.NoError(t, err)
|
||||
message := []byte("The quick brown fox jumps over the lazy dog")
|
||||
signature, err := privateKey.Sign(message)
|
||||
require.NoError(t, err)
|
||||
require.True(t, signature.Verify(message, privateKey.PubKey()))
|
||||
pubKey := keys.PublicKey{
|
||||
X: privateKey.PubKey().X,
|
||||
Y: privateKey.PubKey().Y,
|
||||
}
|
||||
expected := pubKey.UncompressedBytes()[1:]
|
||||
|
||||
// We don't know which of two recovered keys suites, so let's try both.
|
||||
putOnStackGetResult := func(isEven bool) []byte {
|
||||
v.Estack().PushVal(message)
|
||||
v.Estack().PushVal(isEven)
|
||||
v.Estack().PushVal(signature.S.Bytes())
|
||||
v.Estack().PushVal(signature.R.Bytes())
|
||||
err = context.secp256k1Recover(v)
|
||||
require.NoError(t, err)
|
||||
return v.Estack().Pop().Value().([]byte)
|
||||
}
|
||||
|
||||
// First one:
|
||||
actualFalse := putOnStackGetResult(false)
|
||||
// Second one:
|
||||
actualTrue := putOnStackGetResult(true)
|
||||
|
||||
require.True(t, bytes.Compare(expected, actualFalse) != bytes.Compare(expected, actualTrue))
|
||||
}
|
||||
|
||||
func TestSecp256r1Recover(t *testing.T) {
|
||||
v, context, chain := createVM(t)
|
||||
defer chain.Close()
|
||||
|
||||
privateKey, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
message := []byte("The quick brown fox jumps over the lazy dog")
|
||||
messageHash := hash.Sha256(message).BytesBE()
|
||||
signature := privateKey.Sign(message)
|
||||
require.True(t, privateKey.PublicKey().Verify(signature, messageHash))
|
||||
expected := privateKey.PublicKey().UncompressedBytes()[1:]
|
||||
|
||||
// We don't know which of two recovered keys suites, so let's try both.
|
||||
putOnStackGetResult := func(isEven bool) []byte {
|
||||
v.Estack().PushVal(messageHash)
|
||||
v.Estack().PushVal(isEven)
|
||||
v.Estack().PushVal(signature[32:64])
|
||||
v.Estack().PushVal(signature[0:32])
|
||||
err = context.secp256r1Recover(v)
|
||||
require.NoError(t, err)
|
||||
return v.Estack().Pop().Value().([]byte)
|
||||
}
|
||||
|
||||
// First one:
|
||||
actualFalse := putOnStackGetResult(false)
|
||||
// Second one:
|
||||
actualTrue := putOnStackGetResult(true)
|
||||
|
||||
require.True(t, bytes.Compare(expected, actualFalse) != bytes.Compare(expected, actualTrue))
|
||||
}
|
||||
|
||||
// Helper functions to create VM, InteropContext, TX, Account, Contract, Asset.
|
||||
|
||||
func createVM(t *testing.T) (*vm.VM, *interopContext, *Blockchain) {
|
||||
v := vm.New()
|
||||
chain := newTestChain(t)
|
||||
context := chain.newInteropContext(trigger.Application, dao.NewSimple(storage.NewMemoryStore()), nil, nil)
|
||||
return v, context, chain
|
||||
}
|
||||
|
||||
func createVMAndPushBlock(t *testing.T) (*vm.VM, *block.Block, *interopContext, *Blockchain) {
|
||||
v := vm.New()
|
||||
block := newDumbBlock()
|
||||
|
|
|
@ -322,6 +322,10 @@ func (ic *interopContext) runtimeCheckWitness(v *vm.VM) error {
|
|||
hashOrKey := v.Estack().Pop().Bytes()
|
||||
hash, err := util.Uint160DecodeBytesBE(hashOrKey)
|
||||
if err != nil {
|
||||
// We only accept compressed keys here as per C# implementation.
|
||||
if len(hashOrKey) != 33 {
|
||||
return errors.New("bad parameter length")
|
||||
}
|
||||
key := &keys.PublicKey{}
|
||||
err = key.DecodeBytes(hashOrKey)
|
||||
if err != nil {
|
||||
|
@ -557,12 +561,12 @@ func (ic *interopContext) contractDestroy(v *vm.VM) error {
|
|||
return err
|
||||
}
|
||||
if cs.HasStorage() {
|
||||
siMap, err := ic.dao.GetStorageItems(hash)
|
||||
siMap, err := ic.dao.GetStorageItems(hash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k := range siMap {
|
||||
_ = ic.dao.DeleteStorageItem(hash, []byte(k))
|
||||
for i := range siMap {
|
||||
_ = ic.dao.DeleteStorageItem(hash, siMap[i].Key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -575,8 +579,12 @@ func (ic *interopContext) contractGetStorageContext(v *vm.VM) error {
|
|||
if !ok {
|
||||
return fmt.Errorf("%T is not a contract state", cs)
|
||||
}
|
||||
contractState, err := ic.dao.GetContractState(cs.ScriptHash())
|
||||
if contractState == nil || err != nil {
|
||||
_, err := ic.dao.GetContractState(cs.ScriptHash())
|
||||
if err != nil {
|
||||
return fmt.Errorf("non-existent contract")
|
||||
}
|
||||
_, err = ic.lowerDao.GetContractState(cs.ScriptHash())
|
||||
if err == nil {
|
||||
return fmt.Errorf("contract was not created in this transaction")
|
||||
}
|
||||
stc := &StorageContext{
|
||||
|
|
|
@ -27,6 +27,7 @@ type interopContext struct {
|
|||
block *block.Block
|
||||
tx *transaction.Transaction
|
||||
dao *dao.Cached
|
||||
lowerDao dao.DAO
|
||||
notifications []state.NotificationEvent
|
||||
log *zap.Logger
|
||||
}
|
||||
|
@ -34,7 +35,7 @@ type interopContext struct {
|
|||
func newInteropContext(trigger trigger.Type, bc Blockchainer, d dao.DAO, block *block.Block, tx *transaction.Transaction, log *zap.Logger) *interopContext {
|
||||
dao := dao.NewCached(d)
|
||||
nes := make([]state.NotificationEvent, 0)
|
||||
return &interopContext{bc, trigger, block, tx, dao, nes, log}
|
||||
return &interopContext{bc, trigger, block, tx, dao, d, nes, log}
|
||||
}
|
||||
|
||||
// SpawnVM returns a VM with script getter and interop functions set
|
||||
|
@ -51,6 +52,9 @@ func (ic *interopContext) SpawnVM() *vm.VM {
|
|||
})
|
||||
vm.RegisterInteropGetter(ic.getSystemInterop)
|
||||
vm.RegisterInteropGetter(ic.getNeoInterop)
|
||||
if ic.bc != nil && ic.bc.GetConfig().EnableStateRoot {
|
||||
vm.RegisterInteropGetter(ic.getNeoxInterop)
|
||||
}
|
||||
return vm
|
||||
}
|
||||
|
||||
|
@ -76,6 +80,12 @@ func (ic *interopContext) getNeoInterop(id uint32) *vm.InteropFuncPrice {
|
|||
return ic.getInteropFromSlice(id, neoInterops)
|
||||
}
|
||||
|
||||
// getNeoxInterop returns matching interop function from the NeoX extension
|
||||
// for a given id in the current context.
|
||||
func (ic *interopContext) getNeoxInterop(id uint32) *vm.InteropFuncPrice {
|
||||
return ic.getInteropFromSlice(id, neoxInterops)
|
||||
}
|
||||
|
||||
// getInteropFromSlice returns matching interop function from the given slice of
|
||||
// interop functions in the current context.
|
||||
func (ic *interopContext) getInteropFromSlice(id uint32, slice []interopedFunction) *vm.InteropFuncPrice {
|
||||
|
@ -275,6 +285,11 @@ var neoInterops = []interopedFunction{
|
|||
{Name: "AntShares.Transaction.GetType", Func: (*interopContext).txGetType, Price: 1},
|
||||
}
|
||||
|
||||
var neoxInterops = []interopedFunction{
|
||||
{Name: "Neo.Cryptography.Secp256k1Recover", Func: (*interopContext).secp256k1Recover, Price: 100},
|
||||
{Name: "Neo.Cryptography.Secp256r1Recover", Func: (*interopContext).secp256r1Recover, Price: 100},
|
||||
}
|
||||
|
||||
// initIDinInteropsSlice initializes IDs from names in one given
|
||||
// interopedFunction slice and then sorts it.
|
||||
func initIDinInteropsSlice(iops []interopedFunction) {
|
||||
|
@ -290,4 +305,5 @@ func initIDinInteropsSlice(iops []interopedFunction) {
|
|||
func init() {
|
||||
initIDinInteropsSlice(systemInterops)
|
||||
initIDinInteropsSlice(neoInterops)
|
||||
initIDinInteropsSlice(neoxInterops)
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
// Feer is an interface that abstract the implementation of the fee calculation.
|
||||
type Feer interface {
|
||||
BlockHeight() uint32
|
||||
NetworkFee(t *transaction.Transaction) util.Fixed8
|
||||
IsLowPriority(util.Fixed8) bool
|
||||
FeePerByte(t *transaction.Transaction) util.Fixed8
|
||||
|
|
|
@ -2,9 +2,9 @@ package mempool
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"math/bits"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -26,7 +26,7 @@ var (
|
|||
// item represents a transaction in the the Memory pool.
|
||||
type item struct {
|
||||
txn *transaction.Transaction
|
||||
timeStamp time.Time
|
||||
blockStamp uint32
|
||||
perByteFee util.Fixed8
|
||||
netFee util.Fixed8
|
||||
isLowPrio bool
|
||||
|
@ -49,6 +49,9 @@ type Pool struct {
|
|||
inputs []*transaction.Input
|
||||
claims []*transaction.Input
|
||||
|
||||
resendThreshold uint32
|
||||
resendFunc func(*transaction.Transaction)
|
||||
|
||||
capacity int
|
||||
}
|
||||
|
||||
|
@ -162,7 +165,7 @@ func dropInputFromSortedSlice(slice *[]*transaction.Input, input *transaction.In
|
|||
func (mp *Pool) Add(t *transaction.Transaction, fee Feer) error {
|
||||
var pItem = &item{
|
||||
txn: t,
|
||||
timeStamp: time.Now().UTC(),
|
||||
blockStamp: fee.BlockHeight(),
|
||||
perByteFee: fee.FeePerByte(t),
|
||||
netFee: fee.NetworkFee(t),
|
||||
}
|
||||
|
@ -177,7 +180,6 @@ func (mp *Pool) Add(t *transaction.Transaction, fee Feer) error {
|
|||
return ErrDup
|
||||
}
|
||||
|
||||
mp.verifiedMap[t.Hash()] = pItem
|
||||
// Insert into sorted array (from max to min, that could also be done
|
||||
// using sort.Sort(sort.Reverse()), but it incurs more overhead. Notice
|
||||
// also that we're searching for position that is strictly more
|
||||
|
@ -206,6 +208,7 @@ func (mp *Pool) Add(t *transaction.Transaction, fee Feer) error {
|
|||
copy(mp.verifiedTxes[n+1:], mp.verifiedTxes[n:])
|
||||
mp.verifiedTxes[n] = pItem
|
||||
}
|
||||
mp.verifiedMap[t.Hash()] = pItem
|
||||
|
||||
// For lots of inputs it might be easier to push them all and sort
|
||||
// afterwards, but that requires benchmarking.
|
||||
|
@ -258,13 +261,14 @@ func (mp *Pool) Remove(hash util.Uint256) {
|
|||
// RemoveStale filters verified transactions through the given function keeping
|
||||
// only the transactions for which it returns a true result. It's used to quickly
|
||||
// drop part of the mempool that is now invalid after the block acceptance.
|
||||
func (mp *Pool) RemoveStale(isOK func(*transaction.Transaction) bool) {
|
||||
func (mp *Pool) RemoveStale(isOK func(*transaction.Transaction) bool, height uint32) {
|
||||
mp.lock.Lock()
|
||||
// We can reuse already allocated slice
|
||||
// because items are iterated one-by-one in increasing order.
|
||||
newVerifiedTxes := mp.verifiedTxes[:0]
|
||||
newInputs := mp.inputs[:0]
|
||||
newClaims := mp.claims[:0]
|
||||
var staleTxs []*transaction.Transaction
|
||||
for _, itm := range mp.verifiedTxes {
|
||||
if isOK(itm.txn) {
|
||||
newVerifiedTxes = append(newVerifiedTxes, itm)
|
||||
|
@ -277,10 +281,21 @@ func (mp *Pool) RemoveStale(isOK func(*transaction.Transaction) bool) {
|
|||
newClaims = append(newClaims, &claim.Claims[i])
|
||||
}
|
||||
}
|
||||
if mp.resendThreshold != 0 {
|
||||
// tx is resend at resendThreshold, 2*resendThreshold, 4*resendThreshold ...
|
||||
// so quotient must be a power of two.
|
||||
diff := (height - itm.blockStamp)
|
||||
if diff%mp.resendThreshold == 0 && bits.OnesCount32(diff/mp.resendThreshold) == 1 {
|
||||
staleTxs = append(staleTxs, itm.txn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
delete(mp.verifiedMap, itm.txn.Hash())
|
||||
}
|
||||
}
|
||||
if len(staleTxs) != 0 {
|
||||
go mp.resendStaleTxs(staleTxs)
|
||||
}
|
||||
sort.Slice(newInputs, func(i, j int) bool {
|
||||
return newInputs[i].Cmp(newInputs[j]) < 0
|
||||
})
|
||||
|
@ -302,6 +317,21 @@ func NewMemPool(capacity int) Pool {
|
|||
}
|
||||
}
|
||||
|
||||
// SetResendThreshold sets threshold after which transaction will be considered stale
|
||||
// and returned for retransmission by `GetStaleTransactions`.
|
||||
func (mp *Pool) SetResendThreshold(h uint32, f func(*transaction.Transaction)) {
|
||||
mp.lock.Lock()
|
||||
defer mp.lock.Unlock()
|
||||
mp.resendThreshold = h
|
||||
mp.resendFunc = f
|
||||
}
|
||||
|
||||
func (mp *Pool) resendStaleTxs(txs []*transaction.Transaction) {
|
||||
for i := range txs {
|
||||
mp.resendFunc(txs[i])
|
||||
}
|
||||
}
|
||||
|
||||
// TryGetValue returns a transaction and its fee if it exists in the memory pool.
|
||||
func (mp *Pool) TryGetValue(hash util.Uint256) (*transaction.Transaction, util.Fixed8, bool) {
|
||||
mp.lock.RLock()
|
||||
|
|
|
@ -3,6 +3,7 @@ package mempool
|
|||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/random"
|
||||
|
@ -12,12 +13,17 @@ import (
|
|||
)
|
||||
|
||||
type FeerStub struct {
|
||||
blockHeight uint32
|
||||
lowPriority bool
|
||||
sysFee util.Fixed8
|
||||
netFee util.Fixed8
|
||||
perByteFee util.Fixed8
|
||||
}
|
||||
|
||||
func (fs *FeerStub) BlockHeight() uint32 {
|
||||
return fs.blockHeight
|
||||
}
|
||||
|
||||
func (fs *FeerStub) NetworkFee(*transaction.Transaction) util.Fixed8 {
|
||||
return fs.netFee
|
||||
}
|
||||
|
@ -53,6 +59,48 @@ func testMemPoolAddRemoveWithFeer(t *testing.T, fs Feer) {
|
|||
assert.Equal(t, 0, len(mp.verifiedTxes))
|
||||
}
|
||||
|
||||
func TestMemPoolRemoveStale(t *testing.T) {
|
||||
mp := NewMemPool(5)
|
||||
txs := make([]*transaction.Transaction, 5)
|
||||
for i := range txs {
|
||||
txs[i] = newMinerTX(uint32(i))
|
||||
require.NoError(t, mp.Add(txs[i], &FeerStub{blockHeight: uint32(i)}))
|
||||
}
|
||||
|
||||
staleTxs := make(chan *transaction.Transaction, 5)
|
||||
f := func(tx *transaction.Transaction) {
|
||||
staleTxs <- tx
|
||||
}
|
||||
mp.SetResendThreshold(5, f)
|
||||
|
||||
isValid := func(tx *transaction.Transaction) bool {
|
||||
return tx.Data.(*transaction.MinerTX).Nonce%2 == 0
|
||||
}
|
||||
|
||||
mp.RemoveStale(isValid, 5) // 0 + 5
|
||||
require.Eventually(t, func() bool { return len(staleTxs) == 1 }, time.Second, time.Millisecond*100)
|
||||
require.Equal(t, txs[0], <-staleTxs)
|
||||
|
||||
mp.RemoveStale(isValid, 7) // 2 + 5
|
||||
require.Eventually(t, func() bool { return len(staleTxs) == 1 }, time.Second, time.Millisecond*100)
|
||||
require.Equal(t, txs[2], <-staleTxs)
|
||||
|
||||
mp.RemoveStale(isValid, 10) // 0 + 2 * 5
|
||||
require.Eventually(t, func() bool { return len(staleTxs) == 1 }, time.Second, time.Millisecond*100)
|
||||
require.Equal(t, txs[0], <-staleTxs)
|
||||
|
||||
mp.RemoveStale(isValid, 15) // 0 + 3 * 5
|
||||
|
||||
// tx[2] should appear, so it is also checked that tx[0] wasn't sent on height 15.
|
||||
mp.RemoveStale(isValid, 22) // 2 + 4 * 5
|
||||
require.Eventually(t, func() bool { return len(staleTxs) == 1 }, time.Second, time.Millisecond*100)
|
||||
require.Equal(t, txs[2], <-staleTxs)
|
||||
|
||||
// panic if something is sent after this.
|
||||
close(staleTxs)
|
||||
require.Len(t, staleTxs, 0)
|
||||
}
|
||||
|
||||
func TestMemPoolAddRemove(t *testing.T) {
|
||||
var fs = &FeerStub{lowPriority: false}
|
||||
t.Run("low priority", func(t *testing.T) { testMemPoolAddRemoveWithFeer(t, fs) })
|
||||
|
@ -119,7 +167,7 @@ func TestMemPoolAddRemoveWithInputsAndClaims(t *testing.T) {
|
|||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}, 0)
|
||||
assert.Equal(t, len(txm1.Inputs), len(mp.inputs))
|
||||
assert.True(t, sort.SliceIsSorted(mp.inputs, mpLessInputs))
|
||||
assert.Equal(t, len(claim2.Claims), len(mp.claims))
|
||||
|
@ -255,6 +303,9 @@ func TestOverCapacity(t *testing.T) {
|
|||
txcnt++
|
||||
require.Error(t, mp.Add(tx, fs))
|
||||
require.Equal(t, mempoolSize, mp.Count())
|
||||
require.Equal(t, mempoolSize, len(mp.verifiedMap))
|
||||
require.Equal(t, mempoolSize, len(mp.verifiedTxes))
|
||||
require.False(t, mp.containsKey(tx.Hash()))
|
||||
require.Equal(t, true, sort.IsSorted(sort.Reverse(mp.verifiedTxes)))
|
||||
|
||||
// But claim tx should still be there.
|
||||
|
@ -333,7 +384,7 @@ func TestRemoveStale(t *testing.T) {
|
|||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}, 0)
|
||||
require.Equal(t, mempoolSize/2, mp.Count())
|
||||
verTxes := mp.GetVerifiedTransactions()
|
||||
for _, txf := range verTxes {
|
||||
|
|
105
pkg/core/mpt/base.go
Normal file
105
pkg/core/mpt/base.go
Normal file
|
@ -0,0 +1,105 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// BaseNode implements basic things every node needs like caching hash and
|
||||
// serialized representation. It's a basic node building block intended to be
|
||||
// included into all node types.
|
||||
type BaseNode struct {
|
||||
hash util.Uint256
|
||||
bytes []byte
|
||||
hashValid bool
|
||||
bytesValid bool
|
||||
}
|
||||
|
||||
// BaseNodeIface abstracts away basic Node functions.
|
||||
type BaseNodeIface interface {
|
||||
Hash() util.Uint256
|
||||
Type() NodeType
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
type flushedNode interface {
|
||||
setCache([]byte, util.Uint256)
|
||||
}
|
||||
|
||||
func (b *BaseNode) setCache(bs []byte, h util.Uint256) {
|
||||
b.bytes = bs
|
||||
b.hash = h
|
||||
b.bytesValid = true
|
||||
b.hashValid = true
|
||||
}
|
||||
|
||||
// getHash returns a hash of this BaseNode.
|
||||
func (b *BaseNode) getHash(n Node) util.Uint256 {
|
||||
if !b.hashValid {
|
||||
b.updateHash(n)
|
||||
}
|
||||
return b.hash
|
||||
}
|
||||
|
||||
// getBytes returns a slice of bytes representing this node.
|
||||
func (b *BaseNode) getBytes(n Node) []byte {
|
||||
if !b.bytesValid {
|
||||
b.updateBytes(n)
|
||||
}
|
||||
return b.bytes
|
||||
}
|
||||
|
||||
// updateHash updates hash field for this BaseNode.
|
||||
func (b *BaseNode) updateHash(n Node) {
|
||||
if n.Type() == HashT {
|
||||
panic("can't update hash for hash node")
|
||||
}
|
||||
b.hash = hash.DoubleSha256(b.getBytes(n))
|
||||
b.hashValid = true
|
||||
}
|
||||
|
||||
// updateCache updates hash and bytes fields for this BaseNode.
|
||||
func (b *BaseNode) updateBytes(n Node) {
|
||||
buf := io.NewBufBinWriter()
|
||||
encodeNodeWithType(n, buf.BinWriter)
|
||||
b.bytes = buf.Bytes()
|
||||
b.bytesValid = true
|
||||
}
|
||||
|
||||
// invalidateCache sets all cache fields to invalid state.
|
||||
func (b *BaseNode) invalidateCache() {
|
||||
b.bytesValid = false
|
||||
b.hashValid = false
|
||||
}
|
||||
|
||||
// encodeNodeWithType encodes node together with it's type.
|
||||
func encodeNodeWithType(n Node, w *io.BinWriter) {
|
||||
w.WriteB(byte(n.Type()))
|
||||
n.EncodeBinary(w)
|
||||
}
|
||||
|
||||
// DecodeNodeWithType decodes node together with it's type.
|
||||
func DecodeNodeWithType(r *io.BinReader) Node {
|
||||
if r.Err != nil {
|
||||
return nil
|
||||
}
|
||||
var n Node
|
||||
switch typ := NodeType(r.ReadB()); typ {
|
||||
case BranchT:
|
||||
n = new(BranchNode)
|
||||
case ExtensionT:
|
||||
n = new(ExtensionNode)
|
||||
case HashT:
|
||||
n = new(HashNode)
|
||||
case LeafT:
|
||||
n = new(LeafNode)
|
||||
default:
|
||||
r.Err = fmt.Errorf("invalid node type: %x", typ)
|
||||
return nil
|
||||
}
|
||||
n.DecodeBinary(r)
|
||||
return n
|
||||
}
|
91
pkg/core/mpt/branch.go
Normal file
91
pkg/core/mpt/branch.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// childrenCount represents a number of children of a branch node.
|
||||
childrenCount = 17
|
||||
// lastChild is the index of the last child.
|
||||
lastChild = childrenCount - 1
|
||||
)
|
||||
|
||||
// BranchNode represents MPT's branch node.
|
||||
type BranchNode struct {
|
||||
BaseNode
|
||||
Children [childrenCount]Node
|
||||
}
|
||||
|
||||
var _ Node = (*BranchNode)(nil)
|
||||
|
||||
// NewBranchNode returns new branch node.
|
||||
func NewBranchNode() *BranchNode {
|
||||
b := new(BranchNode)
|
||||
for i := 0; i < childrenCount; i++ {
|
||||
b.Children[i] = new(HashNode)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Type implements Node interface.
|
||||
func (b *BranchNode) Type() NodeType { return BranchT }
|
||||
|
||||
// Hash implements BaseNode interface.
|
||||
func (b *BranchNode) Hash() util.Uint256 {
|
||||
return b.getHash(b)
|
||||
}
|
||||
|
||||
// Bytes implements BaseNode interface.
|
||||
func (b *BranchNode) Bytes() []byte {
|
||||
return b.getBytes(b)
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (b *BranchNode) EncodeBinary(w *io.BinWriter) {
|
||||
for i := 0; i < childrenCount; i++ {
|
||||
if hn, ok := b.Children[i].(*HashNode); ok {
|
||||
hn.EncodeBinary(w)
|
||||
continue
|
||||
}
|
||||
n := NewHashNode(b.Children[i].Hash())
|
||||
n.EncodeBinary(w)
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (b *BranchNode) DecodeBinary(r *io.BinReader) {
|
||||
for i := 0; i < childrenCount; i++ {
|
||||
b.Children[i] = new(HashNode)
|
||||
b.Children[i].DecodeBinary(r)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (b *BranchNode) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(b.Children)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (b *BranchNode) UnmarshalJSON(data []byte) error {
|
||||
var obj NodeObject
|
||||
if err := obj.UnmarshalJSON(data); err != nil {
|
||||
return err
|
||||
} else if u, ok := obj.Node.(*BranchNode); ok {
|
||||
*b = *u
|
||||
return nil
|
||||
}
|
||||
return errors.New("expected branch node")
|
||||
}
|
||||
|
||||
// splitPath splits path for a branch node.
|
||||
func splitPath(path []byte) (byte, []byte) {
|
||||
if len(path) != 0 {
|
||||
return path[0], path[1:]
|
||||
}
|
||||
return lastChild, path
|
||||
}
|
45
pkg/core/mpt/doc.go
Normal file
45
pkg/core/mpt/doc.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
Package mpt implements MPT (Merkle-Patricia Tree).
|
||||
|
||||
MPT stores key-value pairs and is a trie over 16-symbol alphabet. https://en.wikipedia.org/wiki/Trie
|
||||
Trie is a tree where values are stored in leafs and keys are paths from root to the leaf node.
|
||||
MPT consists of 4 type of nodes:
|
||||
- Leaf node contains only value.
|
||||
- Extension node contains both key and value.
|
||||
- Branch node contains 2 or more children.
|
||||
- Hash node is a compressed node and contains only actual node's hash.
|
||||
The actual node must be retrieved from storage or over the network.
|
||||
|
||||
As an example here is a trie containing 3 pairs:
|
||||
- 0x1201 -> val1
|
||||
- 0x1203 -> val2
|
||||
- 0x1224 -> val3
|
||||
- 0x12 -> val4
|
||||
|
||||
ExtensionNode(0x0102), Next
|
||||
_______________________|
|
||||
|
|
||||
BranchNode [0, 1, 2, ...], Last -> Leaf(val4)
|
||||
| |
|
||||
| ExtensionNode [0x04], Next -> Leaf(val3)
|
||||
|
|
||||
BranchNode [0, 1, 2, 3, ...], Last -> HashNode(nil)
|
||||
| |
|
||||
| Leaf(val2)
|
||||
|
|
||||
Leaf(val1)
|
||||
|
||||
There are 3 invariants that this implementation has:
|
||||
- Branch node cannot have <= 1 children
|
||||
- Extension node cannot have zero-length key
|
||||
- Extension node cannot have another Extension node in it's next field
|
||||
|
||||
Thank to these restrictions, there is a single root hash for every set of key-value pairs
|
||||
irregardless of the order they were added/removed with.
|
||||
The actual trie structure can vary because of node -> HashNode compressing.
|
||||
|
||||
There is also one optimization which cost us almost nothing in terms of complexity but is very beneficial:
|
||||
When we perform get/put/delete on a speficic path, every Hash node which was retreived from storage is
|
||||
replaced by its uncompressed form, so that subsequent hits of this not don't use storage.
|
||||
*/
|
||||
package mpt
|
87
pkg/core/mpt/extension.go
Normal file
87
pkg/core/mpt/extension.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// MaxKeyLength is the max length of the extension node key.
|
||||
const MaxKeyLength = 1125
|
||||
|
||||
// ExtensionNode represents MPT's extension node.
|
||||
type ExtensionNode struct {
|
||||
BaseNode
|
||||
key []byte
|
||||
next Node
|
||||
}
|
||||
|
||||
var _ Node = (*ExtensionNode)(nil)
|
||||
|
||||
// NewExtensionNode returns hash node with the specified key and next node.
|
||||
// Note: because it is a part of Trie, key must be mangled, i.e. must contain only bytes with high half = 0.
|
||||
func NewExtensionNode(key []byte, next Node) *ExtensionNode {
|
||||
return &ExtensionNode{
|
||||
key: key,
|
||||
next: next,
|
||||
}
|
||||
}
|
||||
|
||||
// Type implements Node interface.
|
||||
func (e ExtensionNode) Type() NodeType { return ExtensionT }
|
||||
|
||||
// Hash implements BaseNode interface.
|
||||
func (e *ExtensionNode) Hash() util.Uint256 {
|
||||
return e.getHash(e)
|
||||
}
|
||||
|
||||
// Bytes implements BaseNode interface.
|
||||
func (e *ExtensionNode) Bytes() []byte {
|
||||
return e.getBytes(e)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (e *ExtensionNode) DecodeBinary(r *io.BinReader) {
|
||||
sz := r.ReadVarUint()
|
||||
if sz > MaxKeyLength {
|
||||
r.Err = fmt.Errorf("extension node key is too big: %d", sz)
|
||||
return
|
||||
}
|
||||
e.key = make([]byte, sz)
|
||||
r.ReadBytes(e.key)
|
||||
e.next = new(HashNode)
|
||||
e.next.DecodeBinary(r)
|
||||
e.invalidateCache()
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (e ExtensionNode) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteVarBytes(e.key)
|
||||
n := NewHashNode(e.next.Hash())
|
||||
n.EncodeBinary(w)
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (e *ExtensionNode) MarshalJSON() ([]byte, error) {
|
||||
m := map[string]interface{}{
|
||||
"key": hex.EncodeToString(e.key),
|
||||
"next": e.next,
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (e *ExtensionNode) UnmarshalJSON(data []byte) error {
|
||||
var obj NodeObject
|
||||
if err := obj.UnmarshalJSON(data); err != nil {
|
||||
return err
|
||||
} else if u, ok := obj.Node.(*ExtensionNode); ok {
|
||||
*e = *u
|
||||
return nil
|
||||
}
|
||||
return errors.New("expected extension node")
|
||||
}
|
88
pkg/core/mpt/hash.go
Normal file
88
pkg/core/mpt/hash.go
Normal file
|
@ -0,0 +1,88 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// HashNode represents MPT's hash node.
|
||||
type HashNode struct {
|
||||
BaseNode
|
||||
}
|
||||
|
||||
var _ Node = (*HashNode)(nil)
|
||||
|
||||
// NewHashNode returns hash node with the specified hash.
|
||||
func NewHashNode(h util.Uint256) *HashNode {
|
||||
return &HashNode{
|
||||
BaseNode: BaseNode{
|
||||
hash: h,
|
||||
hashValid: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Type implements Node interface.
|
||||
func (h *HashNode) Type() NodeType { return HashT }
|
||||
|
||||
// Hash implements Node interface.
|
||||
func (h *HashNode) Hash() util.Uint256 {
|
||||
if !h.hashValid {
|
||||
panic("can't get hash of an empty HashNode")
|
||||
}
|
||||
return h.hash
|
||||
}
|
||||
|
||||
// IsEmpty returns true if h is an empty node i.e. contains no hash.
|
||||
func (h *HashNode) IsEmpty() bool { return !h.hashValid }
|
||||
|
||||
// Bytes returns serialized HashNode.
|
||||
func (h *HashNode) Bytes() []byte {
|
||||
return h.getBytes(h)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (h *HashNode) DecodeBinary(r *io.BinReader) {
|
||||
sz := r.ReadVarUint()
|
||||
switch sz {
|
||||
case 0:
|
||||
h.hashValid = false
|
||||
case util.Uint256Size:
|
||||
h.hashValid = true
|
||||
r.ReadBytes(h.hash[:])
|
||||
default:
|
||||
r.Err = fmt.Errorf("invalid hash node size: %d", sz)
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (h HashNode) EncodeBinary(w *io.BinWriter) {
|
||||
if !h.hashValid {
|
||||
w.WriteVarUint(0)
|
||||
return
|
||||
}
|
||||
w.WriteVarBytes(h.hash[:])
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (h *HashNode) MarshalJSON() ([]byte, error) {
|
||||
if !h.hashValid {
|
||||
return []byte(`{}`), nil
|
||||
}
|
||||
return []byte(`{"hash":"` + h.hash.StringLE() + `"}`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (h *HashNode) UnmarshalJSON(data []byte) error {
|
||||
var obj NodeObject
|
||||
if err := obj.UnmarshalJSON(data); err != nil {
|
||||
return err
|
||||
} else if u, ok := obj.Node.(*HashNode); ok {
|
||||
*h = *u
|
||||
return nil
|
||||
}
|
||||
return errors.New("expected hash node")
|
||||
}
|
86
pkg/core/mpt/helpers.go
Normal file
86
pkg/core/mpt/helpers.go
Normal file
|
@ -0,0 +1,86 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// lcp returns longest common prefix of a and b.
|
||||
// Note: it does no allocations.
|
||||
func lcp(a, b []byte) []byte {
|
||||
if len(a) < len(b) {
|
||||
return lcp(b, a)
|
||||
}
|
||||
|
||||
var i int
|
||||
for i = 0; i < len(b); i++ {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return a[:i]
|
||||
}
|
||||
|
||||
// copySlice is a helper for copying slice if needed.
|
||||
func copySlice(a []byte) []byte {
|
||||
b := make([]byte, len(a))
|
||||
copy(b, a)
|
||||
return b
|
||||
}
|
||||
|
||||
// toNibbles mangles path by splitting every byte into 2 containing low- and high- 4-byte part.
|
||||
func toNibbles(path []byte) []byte {
|
||||
result := make([]byte, len(path)*2)
|
||||
for i := range path {
|
||||
result[i*2] = path[i] >> 4
|
||||
result[i*2+1] = path[i] & 0x0F
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ToNeoStorageKey converts storage key to C# neo node's format.
|
||||
// Key is expected to be at least 20 bytes in length.
|
||||
// our format: script hash in BE + key
|
||||
// neo format: script hash in LE + key with 0 between every 16 bytes, padded to len 16.
|
||||
func ToNeoStorageKey(key []byte) []byte {
|
||||
const groupSize = 16
|
||||
|
||||
var nkey []byte
|
||||
for i := util.Uint160Size - 1; i >= 0; i-- {
|
||||
nkey = append(nkey, key[i])
|
||||
}
|
||||
|
||||
key = key[util.Uint160Size:]
|
||||
|
||||
index := 0
|
||||
remain := len(key)
|
||||
for remain >= groupSize {
|
||||
nkey = append(nkey, key[index:index+groupSize]...)
|
||||
nkey = append(nkey, 0)
|
||||
index += groupSize
|
||||
remain -= groupSize
|
||||
}
|
||||
|
||||
if remain > 0 {
|
||||
nkey = append(nkey, key[index:]...)
|
||||
}
|
||||
|
||||
padding := groupSize - remain
|
||||
for i := 0; i < padding; i++ {
|
||||
nkey = append(nkey, 0)
|
||||
}
|
||||
return append(nkey, byte(padding))
|
||||
}
|
||||
|
||||
// ToNeoStorageValue serializes si to a C# neo node's format.
|
||||
// It has additional version (0x00) byte at the beginning.
|
||||
func ToNeoStorageValue(si *state.StorageItem) []byte {
|
||||
const version = 0
|
||||
|
||||
buf := io.NewBufBinWriter()
|
||||
buf.BinWriter.WriteB(version)
|
||||
si.EncodeBinary(buf.BinWriter)
|
||||
return buf.Bytes()
|
||||
}
|
30
pkg/core/mpt/helpers_test.go
Normal file
30
pkg/core/mpt/helpers_test.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestToNeoStorageKey(t *testing.T) {
|
||||
testCases := []struct{ key, res string }{
|
||||
{
|
||||
"0102030405060708091011121314151617181920",
|
||||
"20191817161514131211100908070605040302010000000000000000000000000000000010",
|
||||
},
|
||||
{
|
||||
"010203040506070809101112131415161718192021222324",
|
||||
"2019181716151413121110090807060504030201212223240000000000000000000000000c",
|
||||
},
|
||||
{
|
||||
"0102030405060708091011121314151617181920212223242526272829303132333435363738",
|
||||
"20191817161514131211100908070605040302012122232425262728293031323334353600373800000000000000000000000000000e",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
key, _ := hex.DecodeString(tc.key)
|
||||
res, _ := hex.DecodeString(tc.res)
|
||||
require.Equal(t, res, ToNeoStorageKey(key))
|
||||
}
|
||||
}
|
73
pkg/core/mpt/leaf.go
Normal file
73
pkg/core/mpt/leaf.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// MaxValueLength is a max length of a leaf node value.
|
||||
const MaxValueLength = 1024 * 1024
|
||||
|
||||
// LeafNode represents MPT's leaf node.
|
||||
type LeafNode struct {
|
||||
BaseNode
|
||||
value []byte
|
||||
}
|
||||
|
||||
var _ Node = (*LeafNode)(nil)
|
||||
|
||||
// NewLeafNode returns hash node with the specified value.
|
||||
func NewLeafNode(value []byte) *LeafNode {
|
||||
return &LeafNode{value: value}
|
||||
}
|
||||
|
||||
// Type implements Node interface.
|
||||
func (n LeafNode) Type() NodeType { return LeafT }
|
||||
|
||||
// Hash implements BaseNode interface.
|
||||
func (n *LeafNode) Hash() util.Uint256 {
|
||||
return n.getHash(n)
|
||||
}
|
||||
|
||||
// Bytes implements BaseNode interface.
|
||||
func (n *LeafNode) Bytes() []byte {
|
||||
return n.getBytes(n)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (n *LeafNode) DecodeBinary(r *io.BinReader) {
|
||||
sz := r.ReadVarUint()
|
||||
if sz > MaxValueLength {
|
||||
r.Err = fmt.Errorf("leaf node value is too big: %d", sz)
|
||||
return
|
||||
}
|
||||
n.value = make([]byte, sz)
|
||||
r.ReadBytes(n.value)
|
||||
n.invalidateCache()
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (n LeafNode) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteVarBytes(n.value)
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (n *LeafNode) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`{"value":"` + hex.EncodeToString(n.value) + `"}`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (n *LeafNode) UnmarshalJSON(data []byte) error {
|
||||
var obj NodeObject
|
||||
if err := obj.UnmarshalJSON(data); err != nil {
|
||||
return err
|
||||
} else if u, ok := obj.Node.(*LeafNode); ok {
|
||||
*n = *u
|
||||
return nil
|
||||
}
|
||||
return errors.New("expected leaf node")
|
||||
}
|
119
pkg/core/mpt/node.go
Normal file
119
pkg/core/mpt/node.go
Normal file
|
@ -0,0 +1,119 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// NodeType represents node type..
|
||||
type NodeType byte
|
||||
|
||||
// Node types definitions.
|
||||
const (
|
||||
BranchT NodeType = 0x00
|
||||
ExtensionT NodeType = 0x01
|
||||
HashT NodeType = 0x02
|
||||
LeafT NodeType = 0x03
|
||||
)
|
||||
|
||||
// NodeObject represents Node together with it's type.
|
||||
// It is used for serialization/deserialization where type info
|
||||
// is also expected.
|
||||
type NodeObject struct {
|
||||
Node
|
||||
}
|
||||
|
||||
// Node represents common interface of all MPT nodes.
|
||||
type Node interface {
|
||||
io.Serializable
|
||||
json.Marshaler
|
||||
json.Unmarshaler
|
||||
BaseNodeIface
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (n NodeObject) EncodeBinary(w *io.BinWriter) {
|
||||
encodeNodeWithType(n.Node, w)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (n *NodeObject) DecodeBinary(r *io.BinReader) {
|
||||
n.Node = DecodeNodeWithType(r)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (n *NodeObject) UnmarshalJSON(data []byte) error {
|
||||
var m map[string]json.RawMessage
|
||||
err := json.Unmarshal(data, &m)
|
||||
if err != nil { // it can be a branch node
|
||||
var nodes []NodeObject
|
||||
if err := json.Unmarshal(data, &nodes); err != nil {
|
||||
return err
|
||||
} else if len(nodes) != childrenCount {
|
||||
return errors.New("invalid length of branch node")
|
||||
}
|
||||
|
||||
b := NewBranchNode()
|
||||
for i := range b.Children {
|
||||
b.Children[i] = nodes[i].Node
|
||||
}
|
||||
n.Node = b
|
||||
return nil
|
||||
}
|
||||
|
||||
switch len(m) {
|
||||
case 0:
|
||||
n.Node = new(HashNode)
|
||||
case 1:
|
||||
if v, ok := m["hash"]; ok {
|
||||
var h util.Uint256
|
||||
if err := json.Unmarshal(v, &h); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Node = NewHashNode(h)
|
||||
} else if v, ok = m["value"]; ok {
|
||||
b, err := unmarshalHex(v)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(b) > MaxValueLength {
|
||||
return errors.New("leaf value is too big")
|
||||
}
|
||||
n.Node = NewLeafNode(b)
|
||||
} else {
|
||||
return errors.New("invalid field")
|
||||
}
|
||||
case 2:
|
||||
keyRaw, ok1 := m["key"]
|
||||
nextRaw, ok2 := m["next"]
|
||||
if !ok1 || !ok2 {
|
||||
return errors.New("invalid field")
|
||||
}
|
||||
key, err := unmarshalHex(keyRaw)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(key) > MaxKeyLength {
|
||||
return errors.New("extension key is too big")
|
||||
}
|
||||
|
||||
var next NodeObject
|
||||
if err := json.Unmarshal(nextRaw, &next); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Node = NewExtensionNode(key, next.Node)
|
||||
default:
|
||||
return errors.New("0, 1 or 2 fields expected")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalHex(data json.RawMessage) ([]byte, error) {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hex.DecodeString(s)
|
||||
}
|
156
pkg/core/mpt/node_test.go
Normal file
156
pkg/core/mpt/node_test.go
Normal file
|
@ -0,0 +1,156 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/random"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/testserdes"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getTestFuncEncode(ok bool, expected, actual Node) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Run("IO", func(t *testing.T) {
|
||||
bs, err := testserdes.EncodeBinary(expected)
|
||||
require.NoError(t, err)
|
||||
err = testserdes.DecodeBinary(bs, actual)
|
||||
if !ok {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected.Type(), actual.Type())
|
||||
require.Equal(t, expected.Hash(), actual.Hash())
|
||||
})
|
||||
t.Run("JSON", func(t *testing.T) {
|
||||
bs, err := json.Marshal(expected)
|
||||
require.NoError(t, err)
|
||||
err = json.Unmarshal(bs, actual)
|
||||
if !ok {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected.Type(), actual.Type())
|
||||
require.Equal(t, expected.Hash(), actual.Hash())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_Serializable(t *testing.T) {
|
||||
t.Run("Leaf", func(t *testing.T) {
|
||||
t.Run("Good", func(t *testing.T) {
|
||||
l := NewLeafNode(random.Bytes(123))
|
||||
t.Run("Raw", getTestFuncEncode(true, l, new(LeafNode)))
|
||||
t.Run("WithType", getTestFuncEncode(true, &NodeObject{l}, new(NodeObject)))
|
||||
})
|
||||
t.Run("BigValue", getTestFuncEncode(false,
|
||||
NewLeafNode(random.Bytes(MaxValueLength+1)), new(LeafNode)))
|
||||
})
|
||||
|
||||
t.Run("Extension", func(t *testing.T) {
|
||||
t.Run("Good", func(t *testing.T) {
|
||||
e := NewExtensionNode(random.Bytes(42), NewLeafNode(random.Bytes(10)))
|
||||
t.Run("Raw", getTestFuncEncode(true, e, new(ExtensionNode)))
|
||||
t.Run("WithType", getTestFuncEncode(true, &NodeObject{e}, new(NodeObject)))
|
||||
})
|
||||
t.Run("BigKey", getTestFuncEncode(false,
|
||||
NewExtensionNode(random.Bytes(MaxKeyLength+1), NewLeafNode(random.Bytes(10))), new(ExtensionNode)))
|
||||
})
|
||||
|
||||
t.Run("Branch", func(t *testing.T) {
|
||||
b := NewBranchNode()
|
||||
b.Children[0] = NewLeafNode(random.Bytes(10))
|
||||
b.Children[lastChild] = NewHashNode(random.Uint256())
|
||||
t.Run("Raw", getTestFuncEncode(true, b, new(BranchNode)))
|
||||
t.Run("WithType", getTestFuncEncode(true, &NodeObject{b}, new(NodeObject)))
|
||||
})
|
||||
|
||||
t.Run("Hash", func(t *testing.T) {
|
||||
t.Run("Good", func(t *testing.T) {
|
||||
h := NewHashNode(random.Uint256())
|
||||
t.Run("Raw", getTestFuncEncode(true, h, new(HashNode)))
|
||||
t.Run("WithType", getTestFuncEncode(true, &NodeObject{h}, new(NodeObject)))
|
||||
})
|
||||
t.Run("Empty", func(t *testing.T) { // compare nodes, not hashes
|
||||
testserdes.EncodeDecodeBinary(t, new(HashNode), new(HashNode))
|
||||
})
|
||||
t.Run("InvalidSize", func(t *testing.T) {
|
||||
buf := io.NewBufBinWriter()
|
||||
buf.BinWriter.WriteVarBytes(make([]byte, 13))
|
||||
require.Error(t, testserdes.DecodeBinary(buf.Bytes(), new(HashNode)))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Invalid", func(t *testing.T) {
|
||||
require.Error(t, testserdes.DecodeBinary([]byte{0xFF}, new(NodeObject)))
|
||||
})
|
||||
}
|
||||
|
||||
// https://github.com/neo-project/neo/blob/neox-2.x/neo.UnitTests/UT_MPTTrie.cs#L198
|
||||
func TestJSONSharp(t *testing.T) {
|
||||
tr := NewTrie(nil, false, newTestStore())
|
||||
require.NoError(t, tr.Put([]byte{0xac, 0x11}, []byte{0xac, 0x11}))
|
||||
require.NoError(t, tr.Put([]byte{0xac, 0x22}, []byte{0xac, 0x22}))
|
||||
require.NoError(t, tr.Put([]byte{0xac}, []byte{0xac}))
|
||||
require.NoError(t, tr.Delete([]byte{0xac, 0x11}))
|
||||
require.NoError(t, tr.Delete([]byte{0xac, 0x22}))
|
||||
|
||||
js, err := tr.root.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, `{"key":"0a0c", "next":{"value":"ac"}}`, string(js))
|
||||
}
|
||||
|
||||
func TestInvalidJSON(t *testing.T) {
|
||||
t.Run("InvalidChildrenCount", func(t *testing.T) {
|
||||
var cs [childrenCount + 1]Node
|
||||
for i := range cs {
|
||||
cs[i] = new(HashNode)
|
||||
}
|
||||
data, err := json.Marshal(cs)
|
||||
require.NoError(t, err)
|
||||
|
||||
var n NodeObject
|
||||
require.Error(t, json.Unmarshal(data, &n))
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
data []byte
|
||||
}{
|
||||
{"WrongFieldCount", []byte(`{"key":"0102", "next": {}, "field": {}}`)},
|
||||
{"InvalidField1", []byte(`{"next":{}}`)},
|
||||
{"InvalidField2", []byte(`{"key":"0102", "hash":{}}`)},
|
||||
{"InvalidKey", []byte(`{"key":"xy", "next":{}}`)},
|
||||
{"InvalidNext", []byte(`{"key":"01", "next":[]}`)},
|
||||
{"InvalidHash", []byte(`{"hash":"01"}`)},
|
||||
{"InvalidValue", []byte(`{"value":1}`)},
|
||||
{"InvalidBranch", []byte(`[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]`)},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
var n NodeObject
|
||||
assert.Errorf(t, json.Unmarshal(tc.data, &n), "no error in "+tc.name)
|
||||
}
|
||||
}
|
||||
|
||||
// C# interoperability test
|
||||
// https://github.com/neo-project/neo/blob/neox-2.x/neo.UnitTests/UT_MPTTrie.cs#L135
|
||||
func TestRootHash(t *testing.T) {
|
||||
b := NewBranchNode()
|
||||
r := NewExtensionNode([]byte{0x0A, 0x0C}, b)
|
||||
|
||||
v1 := NewLeafNode([]byte{0xAB, 0xCD})
|
||||
l1 := NewExtensionNode([]byte{0x01}, v1)
|
||||
b.Children[0] = l1
|
||||
|
||||
v2 := NewLeafNode([]byte{0x22, 0x22})
|
||||
l2 := NewExtensionNode([]byte{0x09}, v2)
|
||||
b.Children[9] = l2
|
||||
|
||||
r1 := NewExtensionNode([]byte{0x0A, 0x0C, 0x00, 0x01}, v1)
|
||||
require.Equal(t, "dea3ab46e9461e885ed7091c1e533e0a8030b248d39cbc638962394eaca0fbb3", r1.Hash().StringLE())
|
||||
require.Equal(t, "93e8e1ffe2f83dd92fca67330e273bcc811bf64b8f8d9d1b25d5e7366b47d60d", r.Hash().StringLE())
|
||||
}
|
74
pkg/core/mpt/proof.go
Normal file
74
pkg/core/mpt/proof.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// GetProof returns a proof that key belongs to t.
|
||||
// Proof consist of serialized nodes occuring on path from the root to the leaf of key.
|
||||
func (t *Trie) GetProof(key []byte) ([][]byte, error) {
|
||||
var proof [][]byte
|
||||
path := toNibbles(key)
|
||||
r, err := t.getProof(t.root, path, &proof)
|
||||
if err != nil {
|
||||
return proof, err
|
||||
}
|
||||
t.root = r
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
func (t *Trie) getProof(curr Node, path []byte, proofs *[][]byte) (Node, error) {
|
||||
switch n := curr.(type) {
|
||||
case *LeafNode:
|
||||
if len(path) == 0 {
|
||||
*proofs = append(*proofs, copySlice(n.Bytes()))
|
||||
return n, nil
|
||||
}
|
||||
case *BranchNode:
|
||||
*proofs = append(*proofs, copySlice(n.Bytes()))
|
||||
i, path := splitPath(path)
|
||||
r, err := t.getProof(n.Children[i], path, proofs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.Children[i] = r
|
||||
return n, nil
|
||||
case *ExtensionNode:
|
||||
if bytes.HasPrefix(path, n.key) {
|
||||
*proofs = append(*proofs, copySlice(n.Bytes()))
|
||||
r, err := t.getProof(n.next, path[len(n.key):], proofs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.next = r
|
||||
return n, nil
|
||||
}
|
||||
case *HashNode:
|
||||
if !n.IsEmpty() {
|
||||
r, err := t.getFromStore(n.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.getProof(r, path, proofs)
|
||||
}
|
||||
}
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
// VerifyProof verifies that path indeed belongs to a MPT with the specified root hash.
|
||||
// It also returns value for the key.
|
||||
func VerifyProof(rh util.Uint256, key []byte, proofs [][]byte) ([]byte, bool) {
|
||||
path := toNibbles(key)
|
||||
tr := NewTrie(NewHashNode(rh), false, storage.NewMemCachedStore(storage.NewMemoryStore()))
|
||||
for i := range proofs {
|
||||
h := hash.DoubleSha256(proofs[i])
|
||||
// no errors in Put to memory store
|
||||
_ = tr.Store.Put(makeStorageKey(h[:]), proofs[i])
|
||||
}
|
||||
_, bs, err := tr.getWithPath(tr.root, path)
|
||||
return bs, err == nil
|
||||
}
|
73
pkg/core/mpt/proof_test.go
Normal file
73
pkg/core/mpt/proof_test.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newProofTrie(t *testing.T) *Trie {
|
||||
l := NewLeafNode([]byte("somevalue"))
|
||||
e := NewExtensionNode([]byte{0x05, 0x06, 0x07}, l)
|
||||
l2 := NewLeafNode([]byte("invalid"))
|
||||
e2 := NewExtensionNode([]byte{0x05}, NewHashNode(l2.Hash()))
|
||||
b := NewBranchNode()
|
||||
b.Children[4] = NewHashNode(e.Hash())
|
||||
b.Children[5] = e2
|
||||
|
||||
tr := NewTrie(b, false, newTestStore())
|
||||
require.NoError(t, tr.Put([]byte{0x12, 0x31}, []byte("value1")))
|
||||
require.NoError(t, tr.Put([]byte{0x12, 0x32}, []byte("value2")))
|
||||
tr.putToStore(l)
|
||||
tr.putToStore(e)
|
||||
return tr
|
||||
}
|
||||
|
||||
func TestTrie_GetProof(t *testing.T) {
|
||||
tr := newProofTrie(t)
|
||||
|
||||
t.Run("MissingKey", func(t *testing.T) {
|
||||
_, err := tr.GetProof([]byte{0x12})
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Valid", func(t *testing.T) {
|
||||
_, err := tr.GetProof([]byte{0x12, 0x31})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("MissingHashNode", func(t *testing.T) {
|
||||
_, err := tr.GetProof([]byte{0x55})
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyProof(t *testing.T) {
|
||||
tr := newProofTrie(t)
|
||||
|
||||
t.Run("Simple", func(t *testing.T) {
|
||||
proof, err := tr.GetProof([]byte{0x12, 0x32})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("Good", func(t *testing.T) {
|
||||
v, ok := VerifyProof(tr.root.Hash(), []byte{0x12, 0x32}, proof)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []byte("value2"), v)
|
||||
})
|
||||
|
||||
t.Run("Bad", func(t *testing.T) {
|
||||
_, ok := VerifyProof(tr.root.Hash(), []byte{0x12, 0x31}, proof)
|
||||
require.False(t, ok)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("InsideHash", func(t *testing.T) {
|
||||
key := []byte{0x45, 0x67}
|
||||
proof, err := tr.GetProof(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, ok := VerifyProof(tr.root.Hash(), key, proof)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []byte("somevalue"), v)
|
||||
})
|
||||
}
|
504
pkg/core/mpt/trie.go
Normal file
504
pkg/core/mpt/trie.go
Normal file
|
@ -0,0 +1,504 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// Trie is an MPT trie storing all key-value pairs.
|
||||
type Trie struct {
|
||||
Store *storage.MemCachedStore
|
||||
|
||||
root Node
|
||||
refcountEnabled bool
|
||||
refcount map[util.Uint256]*cachedNode
|
||||
}
|
||||
|
||||
type cachedNode struct {
|
||||
bytes []byte
|
||||
initial int32
|
||||
refcount int32
|
||||
}
|
||||
|
||||
// ErrNotFound is returned when requested trie item is missing.
|
||||
var ErrNotFound = errors.New("item not found")
|
||||
|
||||
// NewTrie returns new MPT trie. It accepts a MemCachedStore to decouple storage errors from logic errors
|
||||
// so that all storage errors are processed during `store.Persist()` at the caller.
|
||||
// This also has the benefit, that every `Put` can be considered an atomic operation.
|
||||
func NewTrie(root Node, enableRefCount bool, store *storage.MemCachedStore) *Trie {
|
||||
if root == nil {
|
||||
root = new(HashNode)
|
||||
}
|
||||
|
||||
return &Trie{
|
||||
Store: store,
|
||||
root: root,
|
||||
|
||||
refcountEnabled: enableRefCount,
|
||||
refcount: make(map[util.Uint256]*cachedNode),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns value for the provided key in t.
|
||||
func (t *Trie) Get(key []byte) ([]byte, error) {
|
||||
path := toNibbles(key)
|
||||
r, bs, err := t.getWithPath(t.root, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.root = r
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
// getWithPath returns value the provided path in a subtrie rooting in curr.
|
||||
// It also returns a current node with all hash nodes along the path
|
||||
// replaced to their "unhashed" counterparts.
|
||||
func (t *Trie) getWithPath(curr Node, path []byte) (Node, []byte, error) {
|
||||
switch n := curr.(type) {
|
||||
case *LeafNode:
|
||||
if len(path) == 0 {
|
||||
return curr, copySlice(n.value), nil
|
||||
}
|
||||
case *BranchNode:
|
||||
i, path := splitPath(path)
|
||||
r, bs, err := t.getWithPath(n.Children[i], path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
n.Children[i] = r
|
||||
return n, bs, nil
|
||||
case *HashNode:
|
||||
if !n.IsEmpty() {
|
||||
if r, err := t.getFromStore(n.hash); err == nil {
|
||||
return t.getWithPath(r, path)
|
||||
}
|
||||
}
|
||||
case *ExtensionNode:
|
||||
if bytes.HasPrefix(path, n.key) {
|
||||
r, bs, err := t.getWithPath(n.next, path[len(n.key):])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
n.next = r
|
||||
return curr, bs, err
|
||||
}
|
||||
default:
|
||||
panic("invalid MPT node type")
|
||||
}
|
||||
return curr, nil, ErrNotFound
|
||||
}
|
||||
|
||||
// Put puts key-value pair in t.
|
||||
func (t *Trie) Put(key, value []byte) error {
|
||||
if len(key) > MaxKeyLength {
|
||||
return errors.New("key is too big")
|
||||
} else if len(value) > MaxValueLength {
|
||||
return errors.New("value is too big")
|
||||
}
|
||||
if len(value) == 0 {
|
||||
return t.Delete(key)
|
||||
}
|
||||
path := toNibbles(key)
|
||||
n := NewLeafNode(value)
|
||||
r, err := t.putIntoNode(t.root, path, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.root = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// putIntoLeaf puts val to trie if current node is a Leaf.
|
||||
// It returns Node if curr needs to be replaced and error if any.
|
||||
func (t *Trie) putIntoLeaf(curr *LeafNode, path []byte, val Node) (Node, error) {
|
||||
v := val.(*LeafNode)
|
||||
if len(path) == 0 {
|
||||
t.removeRef(curr.Hash(), curr.bytes)
|
||||
t.addRef(val.Hash(), val.Bytes())
|
||||
return v, nil
|
||||
}
|
||||
|
||||
b := NewBranchNode()
|
||||
b.Children[path[0]] = t.newSubTrie(path[1:], v, true)
|
||||
b.Children[lastChild] = curr
|
||||
t.addRef(b.Hash(), b.bytes)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// putIntoBranch puts val to trie if current node is a Branch.
|
||||
// It returns Node if curr needs to be replaced and error if any.
|
||||
func (t *Trie) putIntoBranch(curr *BranchNode, path []byte, val Node) (Node, error) {
|
||||
i, path := splitPath(path)
|
||||
t.removeRef(curr.Hash(), curr.bytes)
|
||||
r, err := t.putIntoNode(curr.Children[i], path, val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
curr.Children[i] = r
|
||||
curr.invalidateCache()
|
||||
t.addRef(curr.Hash(), curr.bytes)
|
||||
return curr, nil
|
||||
}
|
||||
|
||||
// putIntoExtension puts val to trie if current node is an Extension.
|
||||
// It returns Node if curr needs to be replaced and error if any.
|
||||
func (t *Trie) putIntoExtension(curr *ExtensionNode, path []byte, val Node) (Node, error) {
|
||||
t.removeRef(curr.Hash(), curr.bytes)
|
||||
if bytes.HasPrefix(path, curr.key) {
|
||||
r, err := t.putIntoNode(curr.next, path[len(curr.key):], val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
curr.next = r
|
||||
curr.invalidateCache()
|
||||
t.addRef(curr.Hash(), curr.bytes)
|
||||
return curr, nil
|
||||
}
|
||||
|
||||
pref := lcp(curr.key, path)
|
||||
lp := len(pref)
|
||||
keyTail := curr.key[lp:]
|
||||
pathTail := path[lp:]
|
||||
|
||||
s1 := t.newSubTrie(keyTail[1:], curr.next, false)
|
||||
b := NewBranchNode()
|
||||
b.Children[keyTail[0]] = s1
|
||||
|
||||
i, pathTail := splitPath(pathTail)
|
||||
s2 := t.newSubTrie(pathTail, val, true)
|
||||
b.Children[i] = s2
|
||||
|
||||
t.addRef(b.Hash(), b.bytes)
|
||||
if lp > 0 {
|
||||
e := NewExtensionNode(copySlice(pref), b)
|
||||
t.addRef(e.Hash(), e.bytes)
|
||||
return e, nil
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// putIntoHash puts val to trie if current node is a HashNode.
|
||||
// It returns Node if curr needs to be replaced and error if any.
|
||||
func (t *Trie) putIntoHash(curr *HashNode, path []byte, val Node) (Node, error) {
|
||||
if curr.IsEmpty() {
|
||||
hn := t.newSubTrie(path, val, true)
|
||||
return hn, nil
|
||||
}
|
||||
|
||||
result, err := t.getFromStore(curr.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.putIntoNode(result, path, val)
|
||||
}
|
||||
|
||||
// newSubTrie create new trie containing node at provided path.
|
||||
func (t *Trie) newSubTrie(path []byte, val Node, newVal bool) Node {
|
||||
if newVal {
|
||||
t.addRef(val.Hash(), val.Bytes())
|
||||
}
|
||||
if len(path) == 0 {
|
||||
return val
|
||||
}
|
||||
e := NewExtensionNode(path, val)
|
||||
t.addRef(e.Hash(), e.bytes)
|
||||
return e
|
||||
}
|
||||
|
||||
// putIntoNode puts val with provided path inside curr and returns updated node.
|
||||
// Reference counters are updated for both curr and returned value.
|
||||
func (t *Trie) putIntoNode(curr Node, path []byte, val Node) (Node, error) {
|
||||
switch n := curr.(type) {
|
||||
case *LeafNode:
|
||||
return t.putIntoLeaf(n, path, val)
|
||||
case *BranchNode:
|
||||
return t.putIntoBranch(n, path, val)
|
||||
case *ExtensionNode:
|
||||
return t.putIntoExtension(n, path, val)
|
||||
case *HashNode:
|
||||
return t.putIntoHash(n, path, val)
|
||||
default:
|
||||
panic("invalid MPT node type")
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes key from trie.
|
||||
// It returns no error on missing key.
|
||||
func (t *Trie) Delete(key []byte) error {
|
||||
path := toNibbles(key)
|
||||
r, err := t.deleteFromNode(t.root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.root = r
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Trie) deleteFromBranch(b *BranchNode, path []byte) (Node, error) {
|
||||
i, path := splitPath(path)
|
||||
h := b.Hash()
|
||||
bs := b.bytes
|
||||
r, err := t.deleteFromNode(b.Children[i], path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.removeRef(h, bs)
|
||||
b.Children[i] = r
|
||||
b.invalidateCache()
|
||||
var count, index int
|
||||
for i := range b.Children {
|
||||
h, ok := b.Children[i].(*HashNode)
|
||||
if !ok || !h.IsEmpty() {
|
||||
index = i
|
||||
count++
|
||||
}
|
||||
}
|
||||
// count is >= 1 because branch node had at least 2 children before deletion.
|
||||
if count > 1 {
|
||||
t.addRef(b.Hash(), b.bytes)
|
||||
return b, nil
|
||||
}
|
||||
c := b.Children[index]
|
||||
if index == lastChild {
|
||||
return c, nil
|
||||
}
|
||||
if h, ok := c.(*HashNode); ok {
|
||||
c, err = t.getFromStore(h.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if e, ok := c.(*ExtensionNode); ok {
|
||||
t.removeRef(e.Hash(), e.bytes)
|
||||
e.key = append([]byte{byte(index)}, e.key...)
|
||||
e.invalidateCache()
|
||||
t.addRef(e.Hash(), e.bytes)
|
||||
return e, nil
|
||||
}
|
||||
|
||||
e := NewExtensionNode([]byte{byte(index)}, c)
|
||||
t.addRef(e.Hash(), e.bytes)
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (t *Trie) deleteFromExtension(n *ExtensionNode, path []byte) (Node, error) {
|
||||
if !bytes.HasPrefix(path, n.key) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
h := n.Hash()
|
||||
bs := n.bytes
|
||||
r, err := t.deleteFromNode(n.next, path[len(n.key):])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.removeRef(h, bs)
|
||||
switch nxt := r.(type) {
|
||||
case *ExtensionNode:
|
||||
t.removeRef(nxt.Hash(), nxt.bytes)
|
||||
n.key = append(n.key, nxt.key...)
|
||||
n.next = nxt.next
|
||||
case *HashNode:
|
||||
if nxt.IsEmpty() {
|
||||
return nxt, nil
|
||||
}
|
||||
default:
|
||||
n.next = r
|
||||
}
|
||||
n.invalidateCache()
|
||||
t.addRef(n.Hash(), n.bytes)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// deleteFromNode removes value with provided path from curr and returns an updated node.
|
||||
// Reference counters are updated for both curr and returned value.
|
||||
func (t *Trie) deleteFromNode(curr Node, path []byte) (Node, error) {
|
||||
switch n := curr.(type) {
|
||||
case *LeafNode:
|
||||
if len(path) == 0 {
|
||||
t.removeRef(curr.Hash(), curr.Bytes())
|
||||
return new(HashNode), nil
|
||||
}
|
||||
return nil, ErrNotFound
|
||||
case *BranchNode:
|
||||
return t.deleteFromBranch(n, path)
|
||||
case *ExtensionNode:
|
||||
return t.deleteFromExtension(n, path)
|
||||
case *HashNode:
|
||||
if n.IsEmpty() {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
newNode, err := t.getFromStore(n.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.deleteFromNode(newNode, path)
|
||||
default:
|
||||
panic("invalid MPT node type")
|
||||
}
|
||||
}
|
||||
|
||||
// StateRoot returns root hash of t.
|
||||
func (t *Trie) StateRoot() util.Uint256 {
|
||||
if hn, ok := t.root.(*HashNode); ok && hn.IsEmpty() {
|
||||
return util.Uint256{}
|
||||
}
|
||||
return t.root.Hash()
|
||||
}
|
||||
|
||||
func makeStorageKey(mptKey []byte) []byte {
|
||||
return append([]byte{byte(storage.DataMPT)}, mptKey...)
|
||||
}
|
||||
|
||||
// Flush puts every node in the trie except Hash ones to the storage.
|
||||
// Because we care only about block-level changes, there is no need to put every
|
||||
// new node to storage. Normally, flush should be called with every StateRoot persist, i.e.
|
||||
// after every block.
|
||||
func (t *Trie) Flush() {
|
||||
for h, node := range t.refcount {
|
||||
if node.refcount != 0 {
|
||||
if node.bytes == nil {
|
||||
panic("item not in trie")
|
||||
}
|
||||
if t.refcountEnabled {
|
||||
node.initial = t.updateRefCount(h)
|
||||
if node.initial == 0 {
|
||||
delete(t.refcount, h)
|
||||
}
|
||||
} else if node.refcount > 0 {
|
||||
_ = t.Store.Put(makeStorageKey(h.BytesBE()), node.bytes)
|
||||
}
|
||||
node.refcount = 0
|
||||
} else {
|
||||
delete(t.refcount, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateRefCount should be called only when refcounting is enabled.
|
||||
func (t *Trie) updateRefCount(h util.Uint256) int32 {
|
||||
if !t.refcountEnabled {
|
||||
panic("`updateRefCount` is called, but GC is disabled")
|
||||
}
|
||||
var data []byte
|
||||
key := makeStorageKey(h.BytesBE())
|
||||
node := t.refcount[h]
|
||||
cnt := node.initial
|
||||
if cnt == 0 {
|
||||
// A newly created item which may be in store.
|
||||
var err error
|
||||
data, err = t.Store.Get(key)
|
||||
if err == nil {
|
||||
cnt = int32(binary.LittleEndian.Uint32(data[len(data)-4:]))
|
||||
}
|
||||
}
|
||||
if len(data) == 0 {
|
||||
data = append(node.bytes, 0, 0, 0, 0)
|
||||
}
|
||||
cnt += node.refcount
|
||||
switch {
|
||||
case cnt < 0:
|
||||
// BUG: negative reference count
|
||||
panic(fmt.Sprintf("negative reference count: %s new %d, upd %d", h.StringBE(), cnt, t.refcount[h]))
|
||||
case cnt == 0:
|
||||
_ = t.Store.Delete(key)
|
||||
default:
|
||||
binary.LittleEndian.PutUint32(data[len(data)-4:], uint32(cnt))
|
||||
_ = t.Store.Put(key, data)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func (t *Trie) addRef(h util.Uint256, bs []byte) {
|
||||
node := t.refcount[h]
|
||||
if node == nil {
|
||||
t.refcount[h] = &cachedNode{
|
||||
refcount: 1,
|
||||
bytes: bs,
|
||||
}
|
||||
return
|
||||
}
|
||||
node.refcount++
|
||||
if node.bytes == nil {
|
||||
node.bytes = bs
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Trie) removeRef(h util.Uint256, bs []byte) {
|
||||
node := t.refcount[h]
|
||||
if node == nil {
|
||||
t.refcount[h] = &cachedNode{
|
||||
refcount: -1,
|
||||
bytes: bs,
|
||||
}
|
||||
return
|
||||
}
|
||||
node.refcount--
|
||||
if node.bytes == nil {
|
||||
node.bytes = bs
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Trie) getFromStore(h util.Uint256) (Node, error) {
|
||||
data, err := t.Store.Get(makeStorageKey(h.BytesBE()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var n NodeObject
|
||||
r := io.NewBinReaderFromBuf(data)
|
||||
n.DecodeBinary(r)
|
||||
if r.Err != nil {
|
||||
return nil, r.Err
|
||||
}
|
||||
|
||||
if t.refcountEnabled {
|
||||
data = data[:len(data)-4]
|
||||
node := t.refcount[h]
|
||||
if node != nil {
|
||||
node.bytes = data
|
||||
node.initial = int32(r.ReadU32LE())
|
||||
}
|
||||
}
|
||||
n.Node.(flushedNode).setCache(data, h)
|
||||
return n.Node, nil
|
||||
}
|
||||
|
||||
// Collapse compresses all nodes at depth n to the hash nodes.
|
||||
// Note: this function does not perform any kind of storage flushing so
|
||||
// `Flush()` should be called explicitly before invoking function.
|
||||
func (t *Trie) Collapse(depth int) {
|
||||
if depth < 0 {
|
||||
panic("negative depth")
|
||||
}
|
||||
t.root = collapse(depth, t.root)
|
||||
t.refcount = make(map[util.Uint256]*cachedNode)
|
||||
}
|
||||
|
||||
func collapse(depth int, node Node) Node {
|
||||
if _, ok := node.(*HashNode); ok {
|
||||
return node
|
||||
} else if depth == 0 {
|
||||
return NewHashNode(node.Hash())
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *BranchNode:
|
||||
for i := range n.Children {
|
||||
n.Children[i] = collapse(depth-1, n.Children[i])
|
||||
}
|
||||
case *ExtensionNode:
|
||||
n.next = collapse(depth-1, n.next)
|
||||
case *LeafNode:
|
||||
case *HashNode:
|
||||
default:
|
||||
panic("invalid MPT node type")
|
||||
}
|
||||
return node
|
||||
}
|
508
pkg/core/mpt/trie_test.go
Normal file
508
pkg/core/mpt/trie_test.go
Normal file
|
@ -0,0 +1,508 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/random"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newTestStore() *storage.MemCachedStore {
|
||||
return storage.NewMemCachedStore(storage.NewMemoryStore())
|
||||
}
|
||||
|
||||
func newTestTrie(t *testing.T) *Trie {
|
||||
b := NewBranchNode()
|
||||
|
||||
l1 := NewLeafNode([]byte{0xAB, 0xCD})
|
||||
b.Children[0] = NewExtensionNode([]byte{0x01}, l1)
|
||||
|
||||
l2 := NewLeafNode([]byte{0x22, 0x22})
|
||||
b.Children[9] = NewExtensionNode([]byte{0x09}, l2)
|
||||
|
||||
v := NewLeafNode([]byte("hello"))
|
||||
h := NewHashNode(v.Hash())
|
||||
b.Children[10] = NewExtensionNode([]byte{0x0e}, h)
|
||||
|
||||
e := NewExtensionNode(toNibbles([]byte{0xAC}), b)
|
||||
tr := NewTrie(e, false, newTestStore())
|
||||
|
||||
tr.putToStore(e)
|
||||
tr.putToStore(b)
|
||||
tr.putToStore(l1)
|
||||
tr.putToStore(l2)
|
||||
tr.putToStore(v)
|
||||
tr.putToStore(b.Children[0])
|
||||
tr.putToStore(b.Children[9])
|
||||
tr.putToStore(b.Children[10])
|
||||
|
||||
return tr
|
||||
}
|
||||
|
||||
func testTrieRefcount(t *testing.T, key1, key2 []byte) {
|
||||
tr := NewTrie(nil, true, storage.NewMemCachedStore(storage.NewMemoryStore()))
|
||||
require.NoError(t, tr.Put(key1, []byte{1}))
|
||||
tr.Flush()
|
||||
require.NoError(t, tr.Put(key2, []byte{1}))
|
||||
tr.Flush()
|
||||
tr.testHas(t, key1, []byte{1})
|
||||
tr.testHas(t, key2, []byte{1})
|
||||
|
||||
// remove first, keep second
|
||||
require.NoError(t, tr.Delete(key1))
|
||||
tr.Flush()
|
||||
tr.testHas(t, key1, nil)
|
||||
tr.testHas(t, key2, []byte{1})
|
||||
|
||||
// no-op
|
||||
require.NoError(t, tr.Put(key1, []byte{1}))
|
||||
require.NoError(t, tr.Delete(key1))
|
||||
tr.Flush()
|
||||
tr.testHas(t, key1, nil)
|
||||
tr.testHas(t, key2, []byte{1})
|
||||
|
||||
// error on delete, refcount should not be updated
|
||||
require.Error(t, tr.Delete(key1))
|
||||
tr.Flush()
|
||||
tr.testHas(t, key1, nil)
|
||||
tr.testHas(t, key2, []byte{1})
|
||||
}
|
||||
|
||||
func TestTrie_Refcount(t *testing.T) {
|
||||
t.Run("Leaf", func(t *testing.T) {
|
||||
testTrieRefcount(t, []byte{0x11}, []byte{0x12})
|
||||
})
|
||||
t.Run("Extension", func(t *testing.T) {
|
||||
testTrieRefcount(t, []byte{0x10, 11}, []byte{0x11, 12})
|
||||
})
|
||||
}
|
||||
|
||||
func TestTrie_PutIntoBranchNode(t *testing.T) {
|
||||
b := NewBranchNode()
|
||||
l := NewLeafNode([]byte{0x8})
|
||||
b.Children[0x7] = NewHashNode(l.Hash())
|
||||
b.Children[0x8] = NewHashNode(random.Uint256())
|
||||
tr := NewTrie(b, false, newTestStore())
|
||||
|
||||
// next
|
||||
require.NoError(t, tr.Put([]byte{}, []byte{0x12, 0x34}))
|
||||
tr.testHas(t, []byte{}, []byte{0x12, 0x34})
|
||||
|
||||
// empty hash node child
|
||||
require.NoError(t, tr.Put([]byte{0x66}, []byte{0x56}))
|
||||
tr.testHas(t, []byte{0x66}, []byte{0x56})
|
||||
require.True(t, isValid(tr.root))
|
||||
|
||||
// missing hash
|
||||
require.Error(t, tr.Put([]byte{0x70}, []byte{0x42}))
|
||||
require.True(t, isValid(tr.root))
|
||||
|
||||
// hash is in store
|
||||
tr.putToStore(l)
|
||||
require.NoError(t, tr.Put([]byte{0x70}, []byte{0x42}))
|
||||
require.True(t, isValid(tr.root))
|
||||
}
|
||||
|
||||
func TestTrie_PutIntoExtensionNode(t *testing.T) {
|
||||
l := NewLeafNode([]byte{0x11})
|
||||
key := []byte{0x12}
|
||||
e := NewExtensionNode(toNibbles(key), NewHashNode(l.Hash()))
|
||||
tr := NewTrie(e, false, newTestStore())
|
||||
|
||||
// missing hash
|
||||
require.Error(t, tr.Put(key, []byte{0x42}))
|
||||
|
||||
tr.putToStore(l)
|
||||
require.NoError(t, tr.Put(key, []byte{0x42}))
|
||||
tr.testHas(t, key, []byte{0x42})
|
||||
require.True(t, isValid(tr.root))
|
||||
}
|
||||
|
||||
func TestTrie_PutIntoHashNode(t *testing.T) {
|
||||
b := NewBranchNode()
|
||||
l := NewLeafNode(random.Bytes(5))
|
||||
e := NewExtensionNode([]byte{0x02}, l)
|
||||
b.Children[1] = NewHashNode(e.Hash())
|
||||
b.Children[9] = NewHashNode(random.Uint256())
|
||||
tr := NewTrie(b, false, newTestStore())
|
||||
|
||||
tr.putToStore(e)
|
||||
|
||||
t.Run("MissingLeafHash", func(t *testing.T) {
|
||||
_, err := tr.Get([]byte{0x12})
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
tr.putToStore(l)
|
||||
|
||||
val := random.Bytes(3)
|
||||
require.NoError(t, tr.Put([]byte{0x12, 0x34}, val))
|
||||
tr.testHas(t, []byte{0x12, 0x34}, val)
|
||||
tr.testHas(t, []byte{0x12}, l.value)
|
||||
require.True(t, isValid(tr.root))
|
||||
}
|
||||
|
||||
func TestTrie_Put(t *testing.T) {
|
||||
trExp := newTestTrie(t)
|
||||
|
||||
trAct := NewTrie(nil, false, newTestStore())
|
||||
require.NoError(t, trAct.Put([]byte{0xAC, 0x01}, []byte{0xAB, 0xCD}))
|
||||
require.NoError(t, trAct.Put([]byte{0xAC, 0x99}, []byte{0x22, 0x22}))
|
||||
require.NoError(t, trAct.Put([]byte{0xAC, 0xAE}, []byte("hello")))
|
||||
|
||||
// Note: the exact tries differ because of ("acae":"hello") node is stored as Hash node in test trie.
|
||||
require.Equal(t, trExp.root.Hash(), trAct.root.Hash())
|
||||
require.True(t, isValid(trAct.root))
|
||||
}
|
||||
|
||||
func TestTrie_PutInvalid(t *testing.T) {
|
||||
tr := NewTrie(nil, false, newTestStore())
|
||||
key, value := []byte("key"), []byte("value")
|
||||
|
||||
// big key
|
||||
require.Error(t, tr.Put(make([]byte, MaxKeyLength+1), value))
|
||||
|
||||
// big value
|
||||
require.Error(t, tr.Put(key, make([]byte, MaxValueLength+1)))
|
||||
|
||||
// this is ok though
|
||||
require.NoError(t, tr.Put(key, value))
|
||||
tr.testHas(t, key, value)
|
||||
}
|
||||
|
||||
func TestTrie_BigPut(t *testing.T) {
|
||||
tr := NewTrie(nil, false, newTestStore())
|
||||
items := []struct{ k, v string }{
|
||||
{"item with long key", "value1"},
|
||||
{"item with matching prefix", "value2"},
|
||||
{"another prefix", "value3"},
|
||||
{"another prefix 2", "value4"},
|
||||
{"another ", "value5"},
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
require.NoError(t, tr.Put([]byte(items[i].k), []byte(items[i].v)))
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
tr.testHas(t, []byte(items[i].k), []byte(items[i].v))
|
||||
}
|
||||
|
||||
t.Run("Rewrite", func(t *testing.T) {
|
||||
k, v := []byte(items[0].k), []byte{0x01, 0x23}
|
||||
require.NoError(t, tr.Put(k, v))
|
||||
tr.testHas(t, k, v)
|
||||
})
|
||||
|
||||
t.Run("Remove", func(t *testing.T) {
|
||||
k := []byte(items[1].k)
|
||||
require.NoError(t, tr.Put(k, []byte{}))
|
||||
tr.testHas(t, k, nil)
|
||||
})
|
||||
}
|
||||
|
||||
func (tr *Trie) putToStore(n Node) {
|
||||
if n.Type() == HashT {
|
||||
panic("can't put hash node in trie")
|
||||
}
|
||||
if tr.refcountEnabled {
|
||||
tr.refcount[n.Hash()] = &cachedNode{
|
||||
bytes: n.Bytes(),
|
||||
refcount: 1,
|
||||
}
|
||||
tr.updateRefCount(n.Hash())
|
||||
} else {
|
||||
_ = tr.Store.Put(makeStorageKey(n.Hash().BytesBE()), n.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *Trie) testHas(t *testing.T, key, value []byte) {
|
||||
v, err := tr.Get(key)
|
||||
if value == nil {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, value, v)
|
||||
}
|
||||
|
||||
// isValid checks for 3 invariants:
|
||||
// - BranchNode contains > 1 children
|
||||
// - ExtensionNode do not contain another extension node
|
||||
// - ExtensionNode do not have nil key
|
||||
// It is used only during testing to catch possible bugs.
|
||||
func isValid(curr Node) bool {
|
||||
switch n := curr.(type) {
|
||||
case *BranchNode:
|
||||
var count int
|
||||
for i := range n.Children {
|
||||
if !isValid(n.Children[i]) {
|
||||
return false
|
||||
}
|
||||
hn, ok := n.Children[i].(*HashNode)
|
||||
if !ok || !hn.IsEmpty() {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count > 1
|
||||
case *ExtensionNode:
|
||||
_, ok := n.next.(*ExtensionNode)
|
||||
return len(n.key) != 0 && !ok
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrie_Get(t *testing.T) {
|
||||
t.Run("HashNode", func(t *testing.T) {
|
||||
tr := newTestTrie(t)
|
||||
tr.testHas(t, []byte{0xAC, 0xAE}, []byte("hello"))
|
||||
})
|
||||
t.Run("UnfoldRoot", func(t *testing.T) {
|
||||
tr := newTestTrie(t)
|
||||
single := NewTrie(NewHashNode(tr.root.Hash()), false, tr.Store)
|
||||
single.testHas(t, []byte{0xAC}, nil)
|
||||
single.testHas(t, []byte{0xAC, 0x01}, []byte{0xAB, 0xCD})
|
||||
single.testHas(t, []byte{0xAC, 0x99}, []byte{0x22, 0x22})
|
||||
single.testHas(t, []byte{0xAC, 0xAE}, []byte("hello"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestTrie_Flush(t *testing.T) {
|
||||
pairs := map[string][]byte{
|
||||
"": []byte("value0"),
|
||||
"key1": []byte("value1"),
|
||||
"key2": []byte("value2"),
|
||||
}
|
||||
|
||||
tr := NewTrie(nil, false, newTestStore())
|
||||
for k, v := range pairs {
|
||||
require.NoError(t, tr.Put([]byte(k), v))
|
||||
}
|
||||
|
||||
tr.Flush()
|
||||
tr = NewTrie(NewHashNode(tr.StateRoot()), false, tr.Store)
|
||||
for k, v := range pairs {
|
||||
actual, err := tr.Get([]byte(k))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, v, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrie_Delete(t *testing.T) {
|
||||
t.Run("No GC", func(t *testing.T) {
|
||||
testTrieDelete(t, false)
|
||||
})
|
||||
t.Run("With GC", func(t *testing.T) {
|
||||
testTrieDelete(t, true)
|
||||
})
|
||||
}
|
||||
|
||||
func testTrieDelete(t *testing.T, enableGC bool) {
|
||||
t.Run("Hash", func(t *testing.T) {
|
||||
t.Run("FromStore", func(t *testing.T) {
|
||||
l := NewLeafNode([]byte{0x12})
|
||||
tr := NewTrie(NewHashNode(l.Hash()), enableGC, newTestStore())
|
||||
t.Run("NotInStore", func(t *testing.T) {
|
||||
require.Error(t, tr.Delete([]byte{}))
|
||||
})
|
||||
|
||||
tr.putToStore(l)
|
||||
tr.testHas(t, []byte{}, []byte{0x12})
|
||||
require.NoError(t, tr.Delete([]byte{}))
|
||||
tr.testHas(t, []byte{}, nil)
|
||||
})
|
||||
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
tr := NewTrie(nil, enableGC, newTestStore())
|
||||
require.Error(t, tr.Delete([]byte{}))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Leaf", func(t *testing.T) {
|
||||
l := NewLeafNode([]byte{0x12, 0x34})
|
||||
tr := NewTrie(l, enableGC, newTestStore())
|
||||
t.Run("NonExistentKey", func(t *testing.T) {
|
||||
require.Error(t, tr.Delete([]byte{0x12}))
|
||||
tr.testHas(t, []byte{}, []byte{0x12, 0x34})
|
||||
})
|
||||
require.NoError(t, tr.Delete([]byte{}))
|
||||
tr.testHas(t, []byte{}, nil)
|
||||
})
|
||||
|
||||
t.Run("Extension", func(t *testing.T) {
|
||||
t.Run("SingleKey", func(t *testing.T) {
|
||||
l := NewLeafNode([]byte{0x12, 0x34})
|
||||
e := NewExtensionNode([]byte{0x0A, 0x0B}, l)
|
||||
tr := NewTrie(e, enableGC, newTestStore())
|
||||
|
||||
t.Run("NonExistentKey", func(t *testing.T) {
|
||||
require.Error(t, tr.Delete([]byte{}))
|
||||
tr.testHas(t, []byte{0xAB}, []byte{0x12, 0x34})
|
||||
})
|
||||
|
||||
require.NoError(t, tr.Delete([]byte{0xAB}))
|
||||
require.True(t, tr.root.(*HashNode).IsEmpty())
|
||||
})
|
||||
|
||||
t.Run("MultipleKeys", func(t *testing.T) {
|
||||
b := NewBranchNode()
|
||||
b.Children[0] = NewExtensionNode([]byte{0x01}, NewLeafNode([]byte{0x12, 0x34}))
|
||||
b.Children[6] = NewExtensionNode([]byte{0x07}, NewLeafNode([]byte{0x56, 0x78}))
|
||||
e := NewExtensionNode([]byte{0x01, 0x02}, b)
|
||||
tr := NewTrie(e, enableGC, newTestStore())
|
||||
|
||||
h := e.Hash()
|
||||
require.NoError(t, tr.Delete([]byte{0x12, 0x01}))
|
||||
tr.testHas(t, []byte{0x12, 0x01}, nil)
|
||||
tr.testHas(t, []byte{0x12, 0x67}, []byte{0x56, 0x78})
|
||||
|
||||
require.NotEqual(t, h, tr.root.Hash())
|
||||
require.Equal(t, toNibbles([]byte{0x12, 0x67}), e.key)
|
||||
require.IsType(t, (*LeafNode)(nil), e.next)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Branch", func(t *testing.T) {
|
||||
t.Run("3 Children", func(t *testing.T) {
|
||||
b := NewBranchNode()
|
||||
b.Children[lastChild] = NewLeafNode([]byte{0x12})
|
||||
b.Children[0] = NewExtensionNode([]byte{0x01}, NewLeafNode([]byte{0x34}))
|
||||
b.Children[1] = NewExtensionNode([]byte{0x06}, NewLeafNode([]byte{0x56}))
|
||||
tr := NewTrie(b, enableGC, newTestStore())
|
||||
require.NoError(t, tr.Delete([]byte{0x16}))
|
||||
tr.testHas(t, []byte{}, []byte{0x12})
|
||||
tr.testHas(t, []byte{0x01}, []byte{0x34})
|
||||
tr.testHas(t, []byte{0x16}, nil)
|
||||
})
|
||||
t.Run("2 Children", func(t *testing.T) {
|
||||
newt := func(t *testing.T) *Trie {
|
||||
b := NewBranchNode()
|
||||
b.Children[lastChild] = NewLeafNode([]byte{0x12})
|
||||
l := NewLeafNode([]byte{0x34})
|
||||
e := NewExtensionNode([]byte{0x06}, l)
|
||||
b.Children[5] = NewHashNode(e.Hash())
|
||||
tr := NewTrie(b, enableGC, newTestStore())
|
||||
tr.putToStore(l)
|
||||
tr.putToStore(e)
|
||||
return tr
|
||||
}
|
||||
|
||||
t.Run("DeleteLast", func(t *testing.T) {
|
||||
t.Run("MergeExtension", func(t *testing.T) {
|
||||
tr := newt(t)
|
||||
require.NoError(t, tr.Delete([]byte{}))
|
||||
tr.testHas(t, []byte{}, nil)
|
||||
tr.testHas(t, []byte{0x56}, []byte{0x34})
|
||||
require.IsType(t, (*ExtensionNode)(nil), tr.root)
|
||||
})
|
||||
|
||||
t.Run("LeaveLeaf", func(t *testing.T) {
|
||||
c := NewBranchNode()
|
||||
c.Children[5] = NewLeafNode([]byte{0x05})
|
||||
c.Children[6] = NewLeafNode([]byte{0x06})
|
||||
|
||||
b := NewBranchNode()
|
||||
b.Children[lastChild] = NewLeafNode([]byte{0x12})
|
||||
b.Children[5] = c
|
||||
tr := NewTrie(b, enableGC, newTestStore())
|
||||
|
||||
require.NoError(t, tr.Delete([]byte{}))
|
||||
tr.testHas(t, []byte{}, nil)
|
||||
tr.testHas(t, []byte{0x55}, []byte{0x05})
|
||||
tr.testHas(t, []byte{0x56}, []byte{0x06})
|
||||
require.IsType(t, (*ExtensionNode)(nil), tr.root)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("DeleteMiddle", func(t *testing.T) {
|
||||
tr := newt(t)
|
||||
require.NoError(t, tr.Delete([]byte{0x56}))
|
||||
tr.testHas(t, []byte{}, []byte{0x12})
|
||||
tr.testHas(t, []byte{0x56}, nil)
|
||||
require.IsType(t, (*LeafNode)(nil), tr.root)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestTrie_PanicInvalidRoot(t *testing.T) {
|
||||
tr := &Trie{Store: newTestStore()}
|
||||
require.Panics(t, func() { _ = tr.Put([]byte{1}, []byte{2}) })
|
||||
require.Panics(t, func() { _, _ = tr.Get([]byte{1}) })
|
||||
require.Panics(t, func() { _ = tr.Delete([]byte{1}) })
|
||||
}
|
||||
|
||||
func TestTrie_Collapse(t *testing.T) {
|
||||
t.Run("PanicNegative", func(t *testing.T) {
|
||||
tr := newTestTrie(t)
|
||||
require.Panics(t, func() { tr.Collapse(-1) })
|
||||
})
|
||||
t.Run("Depth=0", func(t *testing.T) {
|
||||
tr := newTestTrie(t)
|
||||
h := tr.root.Hash()
|
||||
|
||||
_, ok := tr.root.(*HashNode)
|
||||
require.False(t, ok)
|
||||
|
||||
tr.Collapse(0)
|
||||
_, ok = tr.root.(*HashNode)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, h, tr.root.Hash())
|
||||
})
|
||||
t.Run("Branch,Depth=1", func(t *testing.T) {
|
||||
b := NewBranchNode()
|
||||
e := NewExtensionNode([]byte{0x01}, NewLeafNode([]byte("value1")))
|
||||
he := e.Hash()
|
||||
b.Children[0] = e
|
||||
hb := b.Hash()
|
||||
|
||||
tr := NewTrie(b, false, newTestStore())
|
||||
tr.Collapse(1)
|
||||
|
||||
newb, ok := tr.root.(*BranchNode)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, hb, newb.Hash())
|
||||
require.IsType(t, (*HashNode)(nil), b.Children[0])
|
||||
require.Equal(t, he, b.Children[0].Hash())
|
||||
})
|
||||
t.Run("Extension,Depth=1", func(t *testing.T) {
|
||||
l := NewLeafNode([]byte("value"))
|
||||
hl := l.Hash()
|
||||
e := NewExtensionNode([]byte{0x01}, l)
|
||||
h := e.Hash()
|
||||
tr := NewTrie(e, false, newTestStore())
|
||||
tr.Collapse(1)
|
||||
|
||||
newe, ok := tr.root.(*ExtensionNode)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, h, newe.Hash())
|
||||
require.IsType(t, (*HashNode)(nil), newe.next)
|
||||
require.Equal(t, hl, newe.next.Hash())
|
||||
})
|
||||
t.Run("Leaf", func(t *testing.T) {
|
||||
l := NewLeafNode([]byte("value"))
|
||||
tr := NewTrie(l, false, newTestStore())
|
||||
tr.Collapse(10)
|
||||
require.Equal(t, NewLeafNode([]byte("value")), tr.root)
|
||||
})
|
||||
t.Run("Hash", func(t *testing.T) {
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
tr := NewTrie(new(HashNode), false, newTestStore())
|
||||
require.NotPanics(t, func() { tr.Collapse(1) })
|
||||
hn, ok := tr.root.(*HashNode)
|
||||
require.True(t, ok)
|
||||
require.True(t, hn.IsEmpty())
|
||||
})
|
||||
|
||||
h := random.Uint256()
|
||||
hn := NewHashNode(h)
|
||||
tr := NewTrie(hn, false, newTestStore())
|
||||
tr.Collapse(10)
|
||||
|
||||
newRoot, ok := tr.root.(*HashNode)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, NewHashNode(h), newRoot)
|
||||
})
|
||||
}
|
|
@ -30,6 +30,14 @@ var (
|
|||
Namespace: "neogo",
|
||||
},
|
||||
)
|
||||
//stateHeight prometheus metric.
|
||||
stateHeight = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Help: "Current verified state height",
|
||||
Name: "current_state_height",
|
||||
Namespace: "neogo",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -51,3 +59,7 @@ func updateHeaderHeightMetric(hHeight int) {
|
|||
func updateBlockHeightMetric(bHeight uint32) {
|
||||
blockHeight.Set(float64(bHeight))
|
||||
}
|
||||
|
||||
func updateStateHeightMetric(sHeight uint32) {
|
||||
stateHeight.Set(float64(sHeight))
|
||||
}
|
||||
|
|
146
pkg/core/state/mpt_root.go
Normal file
146
pkg/core/state/mpt_root.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package state
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// MPTRootBase represents storage state root.
|
||||
type MPTRootBase struct {
|
||||
Version byte `json:"version"`
|
||||
Index uint32 `json:"index"`
|
||||
PrevHash util.Uint256 `json:"prehash"`
|
||||
Root util.Uint256 `json:"stateroot"`
|
||||
}
|
||||
|
||||
// MPTRoot represents storage state root together with sign info.
|
||||
type MPTRoot struct {
|
||||
MPTRootBase
|
||||
Witness *transaction.Witness `json:"witness,omitempty"`
|
||||
}
|
||||
|
||||
// MPTRootStateFlag represents verification state of the state root.
|
||||
type MPTRootStateFlag byte
|
||||
|
||||
// Possible verification states of MPTRoot.
|
||||
const (
|
||||
Unverified MPTRootStateFlag = 0x00
|
||||
Verified MPTRootStateFlag = 0x01
|
||||
Invalid MPTRootStateFlag = 0x03
|
||||
)
|
||||
|
||||
// MPTRootState represents state root together with its verification state.
|
||||
type MPTRootState struct {
|
||||
MPTRoot `json:"stateroot"`
|
||||
Flag MPTRootStateFlag `json:"flag"`
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (s *MPTRootState) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteB(byte(s.Flag))
|
||||
s.MPTRoot.EncodeBinary(w)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (s *MPTRootState) DecodeBinary(r *io.BinReader) {
|
||||
s.Flag = MPTRootStateFlag(r.ReadB())
|
||||
s.MPTRoot.DecodeBinary(r)
|
||||
}
|
||||
|
||||
// GetSignedPart returns part of MPTRootBase which needs to be signed.
|
||||
func (s *MPTRootBase) GetSignedPart() []byte {
|
||||
buf := io.NewBufBinWriter()
|
||||
s.EncodeBinary(buf.BinWriter)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Equals checks if s == other.
|
||||
func (s *MPTRootBase) Equals(other *MPTRootBase) bool {
|
||||
return s.Version == other.Version && s.Index == other.Index &&
|
||||
s.PrevHash.Equals(other.PrevHash) && s.Root.Equals(other.Root)
|
||||
}
|
||||
|
||||
// Hash returns hash of s.
|
||||
func (s *MPTRootBase) Hash() util.Uint256 {
|
||||
return hash.DoubleSha256(s.GetSignedPart())
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (s *MPTRootBase) DecodeBinary(r *io.BinReader) {
|
||||
s.Version = r.ReadB()
|
||||
s.Index = r.ReadU32LE()
|
||||
s.PrevHash.DecodeBinary(r)
|
||||
s.Root.DecodeBinary(r)
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (s *MPTRootBase) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteB(s.Version)
|
||||
w.WriteU32LE(s.Index)
|
||||
s.PrevHash.EncodeBinary(w)
|
||||
s.Root.EncodeBinary(w)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable.
|
||||
func (s *MPTRoot) DecodeBinary(r *io.BinReader) {
|
||||
s.MPTRootBase.DecodeBinary(r)
|
||||
|
||||
var ws []transaction.Witness
|
||||
r.ReadArray(&ws, 1)
|
||||
if len(ws) == 1 {
|
||||
s.Witness = &ws[0]
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable.
|
||||
func (s *MPTRoot) EncodeBinary(w *io.BinWriter) {
|
||||
s.MPTRootBase.EncodeBinary(w)
|
||||
if s.Witness == nil {
|
||||
w.WriteVarUint(0)
|
||||
} else {
|
||||
w.WriteArray([]*transaction.Witness{s.Witness})
|
||||
}
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer.
|
||||
func (f MPTRootStateFlag) String() string {
|
||||
switch f {
|
||||
case Unverified:
|
||||
return "Unverified"
|
||||
case Verified:
|
||||
return "Verified"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (f MPTRootStateFlag) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + f.String() + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (f *MPTRootStateFlag) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
switch s {
|
||||
case "Unverified":
|
||||
*f = Unverified
|
||||
case "Verified":
|
||||
*f = Verified
|
||||
case "Invalid":
|
||||
*f = Invalid
|
||||
default:
|
||||
return errors.New("unknown flag")
|
||||
}
|
||||
return nil
|
||||
}
|
100
pkg/core/state/mpt_root_test.go
Normal file
100
pkg/core/state/mpt_root_test.go
Normal file
|
@ -0,0 +1,100 @@
|
|||
package state
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/random"
|
||||
"github.com/nspcc-dev/neo-go/pkg/internal/testserdes"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testStateRoot() *MPTRoot {
|
||||
return &MPTRoot{
|
||||
MPTRootBase: MPTRootBase{
|
||||
Version: byte(rand.Uint32()),
|
||||
Index: rand.Uint32(),
|
||||
PrevHash: random.Uint256(),
|
||||
Root: random.Uint256(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateRoot_Serializable(t *testing.T) {
|
||||
r := testStateRoot()
|
||||
testserdes.EncodeDecodeBinary(t, r, new(MPTRoot))
|
||||
|
||||
t.Run("WithWitness", func(t *testing.T) {
|
||||
r.Witness = &transaction.Witness{
|
||||
InvocationScript: random.Bytes(10),
|
||||
VerificationScript: random.Bytes(11),
|
||||
}
|
||||
testserdes.EncodeDecodeBinary(t, r, new(MPTRoot))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStateRootEquals(t *testing.T) {
|
||||
r1 := testStateRoot()
|
||||
r2 := *r1
|
||||
require.True(t, r1.Equals(&r2.MPTRootBase))
|
||||
|
||||
r2.MPTRootBase.Index++
|
||||
require.False(t, r1.Equals(&r2.MPTRootBase))
|
||||
}
|
||||
|
||||
func TestMPTRootState_Serializable(t *testing.T) {
|
||||
rs := &MPTRootState{
|
||||
MPTRoot: *testStateRoot(),
|
||||
Flag: 0x04,
|
||||
}
|
||||
rs.MPTRoot.Witness = &transaction.Witness{
|
||||
InvocationScript: random.Bytes(10),
|
||||
VerificationScript: random.Bytes(11),
|
||||
}
|
||||
testserdes.EncodeDecodeBinary(t, rs, new(MPTRootState))
|
||||
}
|
||||
|
||||
func TestMPTRootStateUnverifiedByDefault(t *testing.T) {
|
||||
var r MPTRootState
|
||||
require.Equal(t, Unverified, r.Flag)
|
||||
}
|
||||
|
||||
func TestMPTRoot_MarshalJSON(t *testing.T) {
|
||||
t.Run("Good", func(t *testing.T) {
|
||||
r := testStateRoot()
|
||||
rs := &MPTRootState{
|
||||
MPTRoot: *r,
|
||||
Flag: Verified,
|
||||
}
|
||||
testserdes.MarshalUnmarshalJSON(t, rs, new(MPTRootState))
|
||||
})
|
||||
|
||||
t.Run("Compatibility", func(t *testing.T) {
|
||||
js := []byte(`{
|
||||
"flag": "Unverified",
|
||||
"stateroot": {
|
||||
"version": 1,
|
||||
"index": 3000000,
|
||||
"prehash": "0x4f30f43af8dd2262fc331c45bfcd9066ebbacda204e6e81371cbd884fe7d6c90",
|
||||
"stateroot": "0xb2fd7e368a848ef70d27cf44940a35237333ed05f1d971c9408f0eb285e0b6f3"
|
||||
}}`)
|
||||
|
||||
rs := new(MPTRootState)
|
||||
require.NoError(t, json.Unmarshal(js, &rs))
|
||||
|
||||
require.EqualValues(t, 1, rs.Version)
|
||||
require.EqualValues(t, 3000000, rs.Index)
|
||||
require.Nil(t, rs.Witness)
|
||||
|
||||
u, err := util.Uint256DecodeStringLE("4f30f43af8dd2262fc331c45bfcd9066ebbacda204e6e81371cbd884fe7d6c90")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, u, rs.PrevHash)
|
||||
|
||||
u, err = util.Uint256DecodeStringLE("b2fd7e368a848ef70d27cf44940a35237333ed05f1d971c9408f0eb285e0b6f3")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, u, rs.Root)
|
||||
})
|
||||
}
|
|
@ -1,26 +1,31 @@
|
|||
package state
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
)
|
||||
|
||||
// NEP5Tracker contains info about a single account in a NEP5 contract.
|
||||
type NEP5Tracker struct {
|
||||
// Balance is the current balance of the account.
|
||||
Balance int64
|
||||
Balance *big.Int
|
||||
// LastUpdatedBlock is a number of block when last `transfer` to or from the
|
||||
// account occured.
|
||||
LastUpdatedBlock uint32
|
||||
}
|
||||
|
||||
// NEP5TransferLog is a log of NEP5 token transfers for the specific command.
|
||||
type NEP5TransferLog struct {
|
||||
// TransferLog is a log of NEP5 token transfers for the specific command.
|
||||
type TransferLog struct {
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
// NEP5TransferSize is a size of a marshaled NEP5Transfer struct in bytes.
|
||||
const NEP5TransferSize = util.Uint160Size*3 + 8 + 4 + 4 + util.Uint256Size
|
||||
const NEP5TransferSize = util.Uint160Size*3 + amountSize + 4 + 4 + util.Uint256Size + 4
|
||||
|
||||
// NEP5Transfer represents a single NEP5 Transfer event.
|
||||
type NEP5Transfer struct {
|
||||
|
@ -32,15 +37,19 @@ type NEP5Transfer struct {
|
|||
To util.Uint160
|
||||
// Amount is the amount of tokens transferred.
|
||||
// It is negative when tokens are sent and positive if they are received.
|
||||
Amount int64
|
||||
Amount *big.Int
|
||||
// Block is a number of block when the event occured.
|
||||
Block uint32
|
||||
// Timestamp is the timestamp of the block where transfer occured.
|
||||
Timestamp uint32
|
||||
// Tx is a hash the transaction.
|
||||
Tx util.Uint256
|
||||
// Index is the index of this transfer in the corresponding tx.
|
||||
Index uint32
|
||||
}
|
||||
|
||||
const amountSize = 32
|
||||
|
||||
// NEP5Balances is a map of the NEP5 contract hashes
|
||||
// to the corresponding structures.
|
||||
type NEP5Balances struct {
|
||||
|
@ -49,6 +58,11 @@ type NEP5Balances struct {
|
|||
NextTransferBatch uint32
|
||||
}
|
||||
|
||||
// NEP5Metadata is a metadata for NEP5 contracts.
|
||||
type NEP5Metadata struct {
|
||||
Decimals int64
|
||||
}
|
||||
|
||||
// NewNEP5Balances returns new NEP5Balances.
|
||||
func NewNEP5Balances() *NEP5Balances {
|
||||
return &NEP5Balances{
|
||||
|
@ -81,8 +95,18 @@ func (bs *NEP5Balances) EncodeBinary(w *io.BinWriter) {
|
|||
}
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
func (bs *NEP5Metadata) DecodeBinary(r *io.BinReader) {
|
||||
bs.Decimals = int64(r.ReadU64LE())
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
func (bs *NEP5Metadata) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteU64LE(uint64(bs.Decimals))
|
||||
}
|
||||
|
||||
// Append appends single transfer to a log.
|
||||
func (lg *NEP5TransferLog) Append(tr *NEP5Transfer) error {
|
||||
func (lg *TransferLog) Append(tr io.Serializable) error {
|
||||
w := io.NewBufBinWriter()
|
||||
tr.EncodeBinary(w.BinWriter)
|
||||
if w.Err != nil {
|
||||
|
@ -93,40 +117,97 @@ func (lg *NEP5TransferLog) Append(tr *NEP5Transfer) error {
|
|||
}
|
||||
|
||||
// ForEach iterates over transfer log returning on first error.
|
||||
func (lg *NEP5TransferLog) ForEach(f func(*NEP5Transfer) error) error {
|
||||
func (lg *TransferLog) ForEach(size int, tr io.Serializable, f func() (bool, error)) (bool, error) {
|
||||
if lg == nil {
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
tr := new(NEP5Transfer)
|
||||
for i := 0; i < len(lg.Raw); i += NEP5TransferSize {
|
||||
r := io.NewBinReaderFromBuf(lg.Raw[i : i+NEP5TransferSize])
|
||||
for i := len(lg.Raw); i > 0; i -= size {
|
||||
r := io.NewBinReaderFromBuf(lg.Raw[i-size : i])
|
||||
tr.DecodeBinary(r)
|
||||
if r.Err != nil {
|
||||
return r.Err
|
||||
} else if err := f(tr); err != nil {
|
||||
return nil
|
||||
return false, r.Err
|
||||
}
|
||||
cont, err := f()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !cont {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Size returns an amount of transfer written in log.
|
||||
func (lg *NEP5TransferLog) Size() int {
|
||||
return len(lg.Raw) / NEP5TransferSize
|
||||
// Size returns an amount of transfer written in log provided size of a single transfer.
|
||||
func (lg *TransferLog) Size() int {
|
||||
return len(lg.Raw)
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
func (t *NEP5Tracker) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteU64LE(uint64(t.Balance))
|
||||
w.WriteVarBytes(emit.IntToBytes(t.Balance))
|
||||
w.WriteU32LE(t.LastUpdatedBlock)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
func (t *NEP5Tracker) DecodeBinary(r *io.BinReader) {
|
||||
t.Balance = int64(r.ReadU64LE())
|
||||
t.Balance = emit.BytesToInt(r.ReadVarBytes(amountSize))
|
||||
t.LastUpdatedBlock = r.ReadU32LE()
|
||||
}
|
||||
|
||||
func parseUint160(addr []byte) util.Uint160 {
|
||||
if u, err := util.Uint160DecodeBytesBE(addr); err == nil {
|
||||
return u
|
||||
}
|
||||
return util.Uint160{}
|
||||
}
|
||||
|
||||
// NEP5TransferFromNotification creates NEP5Transfer structure from the given
|
||||
// notification (and using given context) if it's possible to parse it as
|
||||
// NEP5 transfer.
|
||||
func NEP5TransferFromNotification(ne NotificationEvent, txHash util.Uint256, height uint32, time uint32, index uint32) (*NEP5Transfer, error) {
|
||||
arr, ok := ne.Item.Value().([]vm.StackItem)
|
||||
if !ok || len(arr) != 4 {
|
||||
return nil, errors.New("no array or wrong element count")
|
||||
}
|
||||
op, ok := arr[0].Value().([]byte)
|
||||
if !ok || string(op) != "transfer" {
|
||||
return nil, errors.New("not a 'transfer' event")
|
||||
}
|
||||
from, ok := arr[1].Value().([]byte)
|
||||
if !ok {
|
||||
return nil, errors.New("wrong 'from' type")
|
||||
}
|
||||
to, ok := arr[2].Value().([]byte)
|
||||
if !ok {
|
||||
return nil, errors.New("wrong 'to' type")
|
||||
}
|
||||
amount, ok := arr[3].Value().(*big.Int)
|
||||
if !ok {
|
||||
bs, ok := arr[3].Value().([]byte)
|
||||
if !ok {
|
||||
return nil, errors.New("wrong amount type")
|
||||
}
|
||||
if len(bs) > amountSize {
|
||||
return nil, errors.New("integer overflow")
|
||||
}
|
||||
amount = emit.BytesToInt(bs)
|
||||
}
|
||||
toAddr := parseUint160(to)
|
||||
fromAddr := parseUint160(from)
|
||||
transfer := &NEP5Transfer{
|
||||
Asset: ne.ScriptHash,
|
||||
From: fromAddr,
|
||||
To: toAddr,
|
||||
Amount: amount,
|
||||
Block: height,
|
||||
Timestamp: time,
|
||||
Tx: txHash,
|
||||
Index: index,
|
||||
}
|
||||
return transfer, nil
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
// Note: change NEP5TransferSize constant when changing this function.
|
||||
func (t *NEP5Transfer) EncodeBinary(w *io.BinWriter) {
|
||||
|
@ -136,7 +217,20 @@ func (t *NEP5Transfer) EncodeBinary(w *io.BinWriter) {
|
|||
w.WriteBytes(t.To[:])
|
||||
w.WriteU32LE(t.Block)
|
||||
w.WriteU32LE(t.Timestamp)
|
||||
w.WriteU64LE(uint64(t.Amount))
|
||||
am := emit.IntToBytes(t.Amount)
|
||||
if len(am) > amountSize {
|
||||
panic("bad integer length")
|
||||
}
|
||||
fillerLen := amountSize - len(am)
|
||||
w.WriteBytes(am)
|
||||
var filler byte
|
||||
if t.Amount.Sign() < 0 {
|
||||
filler = 0xff
|
||||
}
|
||||
for i := 0; i < fillerLen; i++ {
|
||||
w.WriteB(filler)
|
||||
}
|
||||
w.WriteU32LE(t.Index)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
|
@ -147,5 +241,8 @@ func (t *NEP5Transfer) DecodeBinary(r *io.BinReader) {
|
|||
r.ReadBytes(t.To[:])
|
||||
t.Block = r.ReadU32LE()
|
||||
t.Timestamp = r.ReadU32LE()
|
||||
t.Amount = int64(r.ReadU64LE())
|
||||
amount := make([]byte, amountSize)
|
||||
r.ReadBytes(amount)
|
||||
t.Amount = emit.BytesToInt(amount)
|
||||
t.Index = r.ReadU32LE()
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package state
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -21,26 +22,27 @@ func TestNEP5TransferLog_Append(t *testing.T) {
|
|||
randomTransfer(r),
|
||||
}
|
||||
|
||||
lg := new(NEP5TransferLog)
|
||||
lg := new(TransferLog)
|
||||
for _, tr := range expected {
|
||||
require.NoError(t, lg.Append(tr))
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), lg.Size())
|
||||
require.Equal(t, len(expected), lg.Size()/NEP5TransferSize)
|
||||
|
||||
i := 0
|
||||
err := lg.ForEach(func(tr *NEP5Transfer) error {
|
||||
i := len(expected) - 1
|
||||
tr := new(NEP5Transfer)
|
||||
cont, err := lg.ForEach(NEP5TransferSize, tr, func() (bool, error) {
|
||||
require.Equal(t, expected[i], tr)
|
||||
i++
|
||||
return nil
|
||||
i--
|
||||
return true, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, cont)
|
||||
}
|
||||
|
||||
func TestNEP5Tracker_EncodeBinary(t *testing.T) {
|
||||
expected := &NEP5Tracker{
|
||||
Balance: int64(rand.Uint64()),
|
||||
Balance: big.NewInt(int64(rand.Uint64())),
|
||||
LastUpdatedBlock: rand.Uint32(),
|
||||
}
|
||||
|
||||
|
@ -52,7 +54,7 @@ func TestNEP5Transfer_DecodeBinary(t *testing.T) {
|
|||
Asset: util.Uint160{1, 2, 3},
|
||||
From: util.Uint160{5, 6, 7},
|
||||
To: util.Uint160{8, 9, 10},
|
||||
Amount: 42,
|
||||
Amount: big.NewInt(42),
|
||||
Block: 12345,
|
||||
Timestamp: 54321,
|
||||
Tx: util.Uint256{8, 5, 3},
|
||||
|
@ -69,7 +71,7 @@ func TestNEP5TransferSize(t *testing.T) {
|
|||
|
||||
func randomTransfer(r *rand.Rand) *NEP5Transfer {
|
||||
return &NEP5Transfer{
|
||||
Amount: int64(r.Uint64()),
|
||||
Amount: big.NewInt(int64(r.Uint64())),
|
||||
Block: r.Uint32(),
|
||||
Asset: random.Uint160(),
|
||||
From: random.Uint160(),
|
||||
|
@ -77,3 +79,7 @@ func randomTransfer(r *rand.Rand) *NEP5Transfer {
|
|||
Tx: random.Uint256(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransfer_Size(t *testing.T) {
|
||||
require.Equal(t, TransferSize, io.GetVarSize(new(Transfer)))
|
||||
}
|
||||
|
|
47
pkg/core/state/transfer_log.go
Normal file
47
pkg/core/state/transfer_log.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package state
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
||||
// TransferSize is a size of a marshaled Transfer struct in bytes.
|
||||
const TransferSize = 2 + 8 + 4 + 4 + util.Uint256Size
|
||||
|
||||
// Transfer represents a single Transfer event.
|
||||
type Transfer struct {
|
||||
// IsGoverning is true iff transfer is for neo token.
|
||||
IsGoverning bool
|
||||
// IsSent is true iff UTXO used in the input.
|
||||
IsSent bool
|
||||
// Amount is the amount of tokens transferred.
|
||||
// It is negative when tokens are sent and positive if they are received.
|
||||
Amount int64
|
||||
// Block is a number of block when the event occured.
|
||||
Block uint32
|
||||
// Timestamp is the timestamp of the block where transfer occured.
|
||||
Timestamp uint32
|
||||
// Tx is a hash the transaction.
|
||||
Tx util.Uint256
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
// Note: change TransferSize constant when changing this function.
|
||||
func (t *Transfer) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteBytes(t.Tx[:])
|
||||
w.WriteU32LE(t.Block)
|
||||
w.WriteU32LE(t.Timestamp)
|
||||
w.WriteU64LE(uint64(t.Amount))
|
||||
w.WriteBool(t.IsGoverning)
|
||||
w.WriteBool(t.IsSent)
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
func (t *Transfer) DecodeBinary(r *io.BinReader) {
|
||||
r.ReadBytes(t.Tx[:])
|
||||
t.Block = r.ReadU32LE()
|
||||
t.Timestamp = r.ReadU32LE()
|
||||
t.Amount = int64(r.ReadU64LE())
|
||||
t.IsGoverning = r.ReadBool()
|
||||
t.IsSent = r.ReadBool()
|
||||
}
|
|
@ -43,6 +43,11 @@ func (s *UnspentCoin) EncodeBinary(bw *io.BinWriter) {
|
|||
func (s *UnspentCoin) DecodeBinary(br *io.BinReader) {
|
||||
s.Height = br.ReadU32LE()
|
||||
br.ReadArray(&s.States)
|
||||
if br.Err == nil {
|
||||
for i := range s.States {
|
||||
s.States[i].Output.Position = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeBinary implements Serializable interface.
|
||||
|
|
|
@ -27,6 +27,7 @@ func TestDecodeEncodeUnspentCoin(t *testing.T) {
|
|||
AssetID: random.Uint256(),
|
||||
Amount: util.Fixed8(420),
|
||||
ScriptHash: random.Uint160(),
|
||||
Position: 1,
|
||||
},
|
||||
SpendHeight: 0,
|
||||
State: CoinConfirmed,
|
||||
|
@ -36,6 +37,7 @@ func TestDecodeEncodeUnspentCoin(t *testing.T) {
|
|||
AssetID: random.Uint256(),
|
||||
Amount: util.Fixed8(4200),
|
||||
ScriptHash: random.Uint160(),
|
||||
Position: 2,
|
||||
},
|
||||
SpendHeight: 111000,
|
||||
State: CoinSpent & CoinClaimed,
|
||||
|
|
|
@ -61,6 +61,12 @@ func (s *BoltDBStore) Get(key []byte) (val []byte, err error) {
|
|||
err = s.db.View(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(Bucket)
|
||||
val = b.Get(key)
|
||||
// Value from Get is only valid for the lifetime of transaction, #1482
|
||||
if val != nil {
|
||||
var valcopy = make([]byte, len(val))
|
||||
copy(valcopy, val)
|
||||
val = valcopy
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if val == nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue