rpc: add configuration extension for MPT-backed iterator sessions

Add ability to switch between current blockchain storage and MPT-backed
storage for iterator traversing process. It may be useful because
iterator implementation traverses underlying backed storage (BoltDB,
LevelDB) inside DB's Seek which is blocking operation for BoltDB:
```
Opening a read transaction and a write transaction in the same goroutine
can cause the writer to deadlock because the database periodically needs
to re-mmap itself as it grows and it cannot do that while a read transaction
is open.

If a long running read transaction (for example, a snapshot transaction)
is needed, you might want to set DB.InitialMmapSize to a large enough
value to avoid potential blocking of write transaction.
```

So during bbolt re-mmaping, standard blockchain DB operations (i.e. persist)
can be blocked until iterator resourses release. The described behaviour
is tested and confirmed on four-nodes privnet with BoltDB and
`SessionExpirationTime` set to be 180 seconds. After new iterator session
is added to the server, the subsequent persist took ~5m21s, see the log
record `2022-06-17T18:58:21.563+0300`:

```
anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go node -p
2022-06-17T18:52:21.535+0300	INFO	initial gas supply is not set or wrong, setting default value	{"InitialGASSupply": "52000000"}
2022-06-17T18:52:21.535+0300	INFO	MaxBlockSize is not set or wrong, setting default value	{"MaxBlockSize": 262144}
2022-06-17T18:52:21.535+0300	INFO	MaxBlockSystemFee is not set or wrong, setting default value	{"MaxBlockSystemFee": 900000000000}
2022-06-17T18:52:21.535+0300	INFO	MaxTransactionsPerBlock is not set or wrong, using default value	{"MaxTransactionsPerBlock": 512}
2022-06-17T18:52:21.535+0300	INFO	MaxValidUntilBlockIncrement is not set or wrong, using default value	{"MaxValidUntilBlockIncrement": 5760}
2022-06-17T18:52:21.535+0300	INFO	Hardforks are not set, using default value
2022-06-17T18:52:21.543+0300	INFO	no storage version found! creating genesis block
2022-06-17T18:52:21.546+0300	INFO	ExtensiblePoolSize is not set or wrong, using default value	{"ExtensiblePoolSize": 20}
2022-06-17T18:52:21.546+0300	INFO	service is running	{"service": "Prometheus", "endpoint": ":2112"}
2022-06-17T18:52:21.547+0300	INFO	starting rpc-server	{"endpoint": ":20331"}
2022-06-17T18:52:21.547+0300	INFO	rpc-server iterator sessions are enabled
2022-06-17T18:52:21.547+0300	INFO	service hasn't started since it's disabled	{"service": "Pprof"}
2022-06-17T18:52:21.547+0300	INFO	node started	{"blockHeight": 0, "headerHeight": 0}

    _   ____________        __________
   / | / / ____/ __ \      / ____/ __ \
  /  |/ / __/ / / / /_____/ / __/ / / /
 / /|  / /___/ /_/ /_____/ /_/ / /_/ /
/_/ |_/_____/\____/      \____/\____/

/NEO-GO:0.99.1-pre-53-g7ccb646e/

2022-06-17T18:52:21.548+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 1}
2022-06-17T18:52:21.550+0300	INFO	started protocol	{"addr": "127.0.0.1:20336", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 65, "id": 1475228436}
2022-06-17T18:52:22.575+0300	INFO	persisted to disk	{"blocks": 65, "keys": 1410, "headerHeight": 65, "blockHeight": 65, "took": "28.193409ms"}
2022-06-17T18:52:24.548+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 2}
2022-06-17T18:52:24.548+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 3}
2022-06-17T18:52:24.548+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 4}
2022-06-17T18:52:24.549+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 5}
2022-06-17T18:52:24.549+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 6}
2022-06-17T18:52:24.549+0300	INFO	started protocol	{"addr": "127.0.0.1:20333", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 65, "id": 3444438498}
2022-06-17T18:52:24.549+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 7}
2022-06-17T18:52:24.549+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 8}
2022-06-17T18:52:24.550+0300	INFO	node reached synchronized state, starting services
2022-06-17T18:52:24.550+0300	INFO	started protocol	{"addr": "127.0.0.1:20334", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 65, "id": 2435677826}
2022-06-17T18:52:24.550+0300	INFO	starting state validation service
2022-06-17T18:52:24.550+0300	INFO	RPC server already started
2022-06-17T18:52:24.550+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 9}
2022-06-17T18:52:24.550+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 10}
2022-06-17T18:52:24.550+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "already connected", "peerCount": 9}
2022-06-17T18:52:24.550+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "already connected", "peerCount": 8}
2022-06-17T18:52:24.550+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "already connected", "peerCount": 7}
2022-06-17T18:52:24.550+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "unexpected empty payload: CMDVersion", "peerCount": 6}
2022-06-17T18:52:24.550+0300	INFO	started protocol	{"addr": "127.0.0.1:20335", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 65, "id": 970555896}
2022-06-17T18:52:24.551+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 7}
2022-06-17T18:52:24.551+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "unexpected empty payload: CMDVersion", "peerCount": 6}
2022-06-17T18:52:24.551+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "unexpected empty payload: CMDVersion", "peerCount": 5}
2022-06-17T18:52:24.551+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "already connected", "peerCount": 4}
2022-06-17T18:52:29.564+0300	INFO	persisted to disk	{"blocks": 1, "keys": 19, "headerHeight": 66, "blockHeight": 66, "took": "12.51808ms"}
2022-06-17T18:52:44.558+0300	INFO	persisted to disk	{"blocks": 1, "keys": 19, "headerHeight": 67, "blockHeight": 67, "took": "1.563137ms"}
2022-06-17T18:55:21.549+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "ping/pong timeout", "peerCount": 3}
2022-06-17T18:55:21.550+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "ping/pong timeout", "peerCount": 2}
2022-06-17T18:55:21.550+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "ping/pong timeout", "peerCount": 1}
2022-06-17T18:55:21.550+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "ping/pong timeout", "peerCount": 0}
2022-06-17T18:55:21.553+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 1}
2022-06-17T18:55:21.554+0300	INFO	started protocol	{"addr": "127.0.0.1:20335", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 77, "id": 970555896}
2022-06-17T18:55:24.554+0300	INFO	new peer connected	{"addr": "172.200.0.4:20333", "peerCount": 2}
2022-06-17T18:55:24.555+0300	INFO	new peer connected	{"addr": "172.200.0.3:20334", "peerCount": 3}
2022-06-17T18:55:24.555+0300	INFO	new peer connected	{"addr": "10.78.13.84:59876", "peerCount": 4}
2022-06-17T18:55:24.555+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 5}
2022-06-17T18:55:24.556+0300	INFO	new peer connected	{"addr": "172.200.0.254:20332", "peerCount": 6}
2022-06-17T18:55:24.556+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 7}
2022-06-17T18:55:24.556+0300	INFO	started protocol	{"addr": "172.200.0.4:20333", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 76, "id": 3444438498}
2022-06-17T18:55:24.556+0300	INFO	new peer connected	{"addr": "172.200.0.1:20335", "peerCount": 8}
2022-06-17T18:55:24.558+0300	INFO	started protocol	{"addr": "127.0.0.1:20336", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 77, "id": 1475228436}
2022-06-17T18:55:24.559+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 9}
2022-06-17T18:55:24.558+0300	INFO	started protocol	{"addr": "172.200.0.3:20334", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 77, "id": 2435677826}
2022-06-17T18:55:24.559+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 10}
2022-06-17T18:55:24.559+0300	WARN	peer disconnected	{"addr": "172.200.0.1:20335", "error": "unexpected empty payload: CMDVersion", "peerCount": 9}
2022-06-17T18:55:24.559+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 10}
2022-06-17T18:55:24.560+0300	INFO	new peer connected	{"addr": "172.200.0.2:20336", "peerCount": 11}
2022-06-17T18:55:24.560+0300	WARN	peer disconnected	{"addr": "172.200.0.254:20332", "error": "identical node id", "peerCount": 10}
2022-06-17T18:55:24.561+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "already connected", "peerCount": 9}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 10}
2022-06-17T18:55:24.561+0300	WARN	peer disconnected	{"addr": "10.78.13.84:59876", "error": "unexpected empty payload: CMDVersion", "peerCount": 9}
2022-06-17T18:55:24.561+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "already connected", "peerCount": 8}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 9}
2022-06-17T18:55:24.561+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "unexpected empty payload: CMDVersion", "peerCount": 8}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 9}
2022-06-17T18:55:24.561+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "unexpected empty payload: CMDVersion", "peerCount": 8}
2022-06-17T18:55:24.561+0300	WARN	peer disconnected	{"addr": "172.200.0.2:20336", "error": "unexpected empty payload: CMDVersion", "peerCount": 7}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 8}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 9}
2022-06-17T18:55:24.561+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "already connected", "peerCount": 8}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 9}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 10}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 11}
2022-06-17T18:55:24.561+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 12}
2022-06-17T18:55:24.562+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "already connected", "peerCount": 11}
2022-06-17T18:55:24.562+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 12}
2022-06-17T18:55:24.562+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 13}
2022-06-17T18:55:24.562+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "already connected", "peerCount": 12}
2022-06-17T18:55:24.562+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "already connected", "peerCount": 11}
2022-06-17T18:55:24.562+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "already connected", "peerCount": 10}
2022-06-17T18:55:24.562+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "unexpected empty payload: CMDVersion", "peerCount": 9}
2022-06-17T18:55:24.563+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 10}
2022-06-17T18:55:24.563+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "already connected", "peerCount": 9}
2022-06-17T18:55:24.563+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "unexpected empty payload: CMDVersion", "peerCount": 8}
2022-06-17T18:55:24.563+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "already connected", "peerCount": 7}
2022-06-17T18:55:24.563+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "max peers reached", "peerCount": 6}
2022-06-17T18:55:24.563+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "already connected", "peerCount": 5}
2022-06-17T18:55:24.563+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "max peers reached", "peerCount": 4}
2022-06-17T18:55:24.563+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "already connected", "peerCount": 3}
2022-06-17T18:57:21.551+0300	WARN	peer disconnected	{"addr": "172.200.0.4:20333", "error": "ping/pong timeout", "peerCount": 2}
2022-06-17T18:57:21.552+0300	WARN	peer disconnected	{"addr": "172.200.0.3:20334", "error": "ping/pong timeout", "peerCount": 1}
2022-06-17T18:57:21.552+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "ping/pong timeout", "peerCount": 0}
2022-06-17T18:57:21.553+0300	INFO	new peer connected	{"addr": "172.200.0.4:20333", "peerCount": 1}
2022-06-17T18:57:21.554+0300	INFO	new peer connected	{"addr": "10.78.13.84:20332", "peerCount": 2}
2022-06-17T18:57:21.555+0300	INFO	started protocol	{"addr": "172.200.0.4:20333", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 82, "id": 3444438498}
2022-06-17T18:57:21.556+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 3}
2022-06-17T18:57:21.556+0300	INFO	new peer connected	{"addr": "10.78.13.84:46076", "peerCount": 4}
2022-06-17T18:57:21.556+0300	INFO	new peer connected	{"addr": "172.200.0.1:20335", "peerCount": 5}
2022-06-17T18:57:21.556+0300	INFO	new peer connected	{"addr": "172.200.0.254:20332", "peerCount": 6}
2022-06-17T18:57:21.556+0300	INFO	new peer connected	{"addr": "10.78.13.84:59972", "peerCount": 7}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 8}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 9}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "172.200.0.2:20336", "peerCount": 10}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 11}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 12}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "172.200.0.3:20334", "peerCount": 13}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 14}
2022-06-17T18:57:21.557+0300	INFO	started protocol	{"addr": "127.0.0.1:20334", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 82, "id": 2435677826}
2022-06-17T18:57:21.557+0300	WARN	peer disconnected	{"addr": "172.200.0.2:20336", "error": "max peers reached", "peerCount": 13}
2022-06-17T18:57:21.557+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 14}
2022-06-17T18:57:21.558+0300	INFO	started protocol	{"addr": "172.200.0.1:20335", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 82, "id": 970555896}
2022-06-17T18:57:21.558+0300	WARN	peer disconnected	{"addr": "172.200.0.254:20332", "error": "identical node id", "peerCount": 13}
2022-06-17T18:57:21.558+0300	INFO	new peer connected	{"addr": "127.0.0.1:20334", "peerCount": 14}
2022-06-17T18:57:21.558+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "max peers reached", "peerCount": 13}
2022-06-17T18:57:21.558+0300	WARN	peer disconnected	{"addr": "10.78.13.84:46076", "error": "identical node id", "peerCount": 12}
2022-06-17T18:57:21.558+0300	INFO	new peer connected	{"addr": "127.0.0.1:20333", "peerCount": 13}
2022-06-17T18:57:21.558+0300	INFO	new peer connected	{"addr": "127.0.0.1:20335", "peerCount": 14}
2022-06-17T18:57:21.558+0300	INFO	new peer connected	{"addr": "127.0.0.1:20336", "peerCount": 15}
2022-06-17T18:57:21.558+0300	WARN	peer disconnected	{"addr": "10.78.13.84:59972", "error": "identical node id", "peerCount": 14}
2022-06-17T18:57:21.558+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "already connected", "peerCount": 13}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "10.78.13.84:20332", "error": "unexpected empty payload: CMDVersion", "peerCount": 12}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "already connected", "peerCount": 11}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "172.200.0.3:20334", "error": "unexpected empty payload: CMDVersion", "peerCount": 10}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "unexpected empty payload: CMDVersion", "peerCount": 9}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20334", "error": "already connected", "peerCount": 8}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "unexpected empty payload: CMDVersion", "peerCount": 7}
2022-06-17T18:57:21.559+0300	INFO	started protocol	{"addr": "127.0.0.1:20336", "userAgent": "/NEO-GO:0.99.1-pre-53-g7ccb646e/", "startHeight": 82, "id": 1475228436}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20333", "error": "already connected", "peerCount": 6}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20335", "error": "already connected", "peerCount": 5}
2022-06-17T18:57:21.559+0300	WARN	peer disconnected	{"addr": "127.0.0.1:20336", "error": "already connected", "peerCount": 4}
2022-06-17T18:58:21.561+0300	INFO	persisted to disk	{"blocks": 1, "keys": 20, "headerHeight": 68, "blockHeight": 68, "took": "5m21.993873018s"}
2022-06-17T18:58:21.563+0300	INFO	persisted to disk	{"blocks": 8, "keys": 111, "headerHeight": 76, "blockHeight": 76, "took": "2.243347ms"}
2022-06-17T18:58:22.567+0300	INFO	persisted to disk	{"blocks": 10, "keys": 135, "headerHeight": 86, "blockHeight": 86, "took": "5.637669ms"}
2022-06-17T18:58:25.565+0300	INFO	persisted to disk	{"blocks": 1, "keys": 19, "headerHeight": 87, "blockHeight": 87, "took": "1.879912ms"}
2022-06-17T18:58:40.572+0300	INFO	persisted to disk	{"blocks": 1, "keys": 20, "headerHeight": 88, "blockHeight": 88, "took": "1.560317ms"}
2022-06-17T18:58:55.579+0300	INFO	persisted to disk	{"blocks": 1, "keys": 19, "headerHeight": 89, "blockHeight": 89, "took": "1.925225ms"}
2022-06-17T18:59:10.587+0300	INFO	persisted to disk	{"blocks": 1, "keys": 19, "headerHeight": 90, "blockHeight": 90, "took": "3.118073ms"}
2022-06-17T18:59:25.592+0300	INFO	persisted to disk	{"blocks": 1, "keys": 19, "headerHeight": 91, "blockHeight": 91, "took": "1.607248ms"}
2022-06-17T18:59:40.600+0300	INFO	persisted to disk	{"blocks": 1, "keys": 20, "headerHeight": 92, "blockHeight": 92, "took": "931.806µs"}
2022-06-17T18:59:55.610+0300	INFO	persisted to disk	{"blocks": 1, "keys": 19, "headerHeight": 93, "blockHeight": 93, "took": "2.019041ms"}

```
This commit is contained in:
Anna Shaleva 2022-06-20 18:20:57 +03:00
parent cbd20eb959
commit b5d39a3ffd
5 changed files with 244 additions and 71 deletions

View file

@ -138,6 +138,7 @@ RPC:
Port: 10332
SessionEnabled: false
SessionExpirationTime: 60
SessionBackedByMPT: false
StartWhenSynchronized: false
TLSConfig:
Address: ""
@ -167,10 +168,28 @@ where:
`terminatesession` JSON-RPC calls will be handled by the server. It is not
recommended to enable this setting for public RPC servers due to possible DoS
attack. Set to `false` by default. If `false`, iterators are expanded into a
set of values (see `MaxIteratorResultItems` setting).
set of values (see `MaxIteratorResultItems` setting). Implementation note: when
BoltDB storage is used as a node backend DB, then enabling iterator sessions may
cause blockchain persist delays up to 2*`SessionExpirationTime` seconds on
early blockchain lifetime stages with relatively small DB size. It can happen
due to BoltDB re-mmapping behaviour traits. If regular persist is a critical
requirement, then we recommend either to decrease `SessionExpirationTime` or to
enable `SessionBackedByMPT`, see `SessionBackedByMPT` documentation for more
details.
- `SessionExpirationTime` is a lifetime of iterator session in seconds. It is set
to `60` seconds by default and is relevant only if `SessionEnabled` is set to
`true`.
- `SessionBackedByMPT` is a flag forcing JSON-RPC server into using MPT-backed
storage for delayed iterator traversal. If `true`, then iterator resources got
after `invoke*` calls will be released immediately. Further iterator traversing
will be performed using MPT-backed storage by retrieving iterator via historical
MPT-provided `invoke*` recall. `SessionBackedByMPT` set to `true` strongly affects
the `traverseiterator` call performance and doesn't allow iterator traversing
for outdated or removed states (see `KeepOnlyLatestState` and
`RemoveUntraceableBlocks` settings documentation for details), thus, it is not
recommended to enable `SessionBackedByMPT` needlessly. `SessionBackedByMPT` is
set to `false` by default and is relevant only if `SessionEnabled` is set to
`true`.
- `StartWhenSynchronized` controls when RPC server will be started, by default
(`false` setting) it's started immediately and RPC is availabe during node
synchronization. Setting it to `true` will make the node start RPC service only

View file

@ -10,6 +10,8 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/storage"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
@ -29,9 +31,20 @@ type Invoke struct {
Session uuid.UUID
finalize func()
onNewSession OnNewSession
// invocationParams is non-nil iff MPT-based iterator sessions are supported.
invocationParams *InvocationParams
}
type OnNewSession func(sessionID string, iterators []ServerIterator, finalize func())
type OnNewSession func(sessionID string, iterators []IteratorIdentifier, params *InvocationParams, finalize func())
// InvocationParams is a set of parameters used for invoke* calls.
type InvocationParams struct {
Trigger trigger.Type
Script []byte
ContractScriptHash util.Uint160
Transaction *transaction.Transaction
NextBlockHeight uint32
}
// InvokeDiag is an additional diagnostic data for invocation.
type InvokeDiag struct {
@ -40,7 +53,7 @@ type InvokeDiag struct {
}
// NewInvoke returns a new Invoke structure with the given fields set.
func NewInvoke(ic *interop.Context, script []byte, faultException string, registerSession OnNewSession, maxIteratorResultItems int) *Invoke {
func NewInvoke(ic *interop.Context, script []byte, faultException string, registerSession OnNewSession, maxIteratorResultItems int, params *InvocationParams) *Invoke {
var diag *InvokeDiag
tree := ic.VM.GetInvocationTree()
if tree != nil {
@ -64,6 +77,7 @@ func NewInvoke(ic *interop.Context, script []byte, faultException string, regist
finalize: ic.Finalize,
onNewSession: registerSession,
maxIteratorResultItems: maxIteratorResultItems,
invocationParams: params,
}
}
@ -105,10 +119,13 @@ type Iterator struct {
Truncated bool
}
// ServerIterator represents Iterator on the server side. It is not for Client usage.
type ServerIterator struct {
ID string
// IteratorIdentifier represents Iterator identifier on the server side. It is not for Client usage.
type IteratorIdentifier struct {
ID string
// Item represents Iterator stackitem. It is nil if SessionBackedByMPT is set to true.
Item stackitem.Item
// StackIndex represents Iterator stackitem index on the stack. It is valid iff Item is nil.
StackIndex int
}
// Finalize releases resources occupied by Iterators created at the script invocation.
@ -129,7 +146,7 @@ func (r Invoke) MarshalJSON() ([]byte, error) {
arr = make([]json.RawMessage, len(r.Stack))
sessionsEnabled = r.onNewSession != nil
sessionID string
iterators []ServerIterator
iterators []IteratorIdentifier
)
if len(r.FaultException) != 0 {
faultSep = " / "
@ -149,10 +166,13 @@ arrloop:
r.FaultException += fmt.Sprintf("%sjson error: failed to marshal iterator: %v", faultSep, err)
break
}
iterators = append(iterators, ServerIterator{
ID: iteratorID,
Item: r.Stack[i],
})
ident := IteratorIdentifier{ID: iteratorID}
if r.invocationParams == nil {
ident.Item = r.Stack[i]
} else {
ident.StackIndex = i
}
iterators = append(iterators, ident)
} else {
iteratorValues, truncated := iterator.ValuesTruncated(r.Stack[i], r.maxIteratorResultItems)
value := make([]json.RawMessage, len(iteratorValues))
@ -185,11 +205,17 @@ arrloop:
if sessionsEnabled && len(iterators) != 0 {
sessionID = uuid.NewString()
r.onNewSession(sessionID, iterators, r.Finalize)
if r.invocationParams == nil {
r.onNewSession(sessionID, iterators, nil, r.Finalize)
} else {
// Call finalizer manually if MPT-based iterator sessions are enabled.
defer r.Finalize()
r.onNewSession(sessionID, iterators, r.invocationParams, nil)
}
} else {
// Call finalizer manually if iterators are disabled or there's no iterator on stack.
defer r.Finalize()
}
if err == nil {
st, err = json.Marshal(arr)
if err != nil {

View file

@ -19,6 +19,7 @@ type (
Port uint16 `yaml:"Port"`
SessionEnabled bool `yaml:"SessionEnabled"`
SessionExpirationTime int `yaml:"SessionExpirationTime"`
SessionBackedByMPT bool `yaml:"SessionBackedByMPT"`
StartWhenSynchronized bool `yaml:"StartWhenSynchronized"`
TLSConfig TLSConfig `yaml:"TLSConfig"`
}

View file

@ -5,7 +5,10 @@ import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"math/big"
"net/http"
"net/http/httptest"
"sort"
"strings"
"sync"
@ -23,6 +26,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/network"
"github.com/nspcc-dev/neo-go/pkg/rpc/client"
"github.com/nspcc-dev/neo-go/pkg/rpc/client/nns"
"github.com/nspcc-dev/neo-go/pkg/rpc/response/result"
@ -1260,47 +1264,113 @@ func TestClient_InvokeAndPackIteratorResults(t *testing.T) {
}
}
func TestClient_IteratorFromInvocation(t *testing.T) {
chain, rpcSrv, httpSrv := initClearServerWithServices(t, false, false, true)
for _, b := range getTestBlocks(t) {
require.NoError(t, chain.AddBlock(b))
}
defer chain.Close()
defer rpcSrv.Shutdown()
c, err := client.New(context.Background(), httpSrv.URL, client.Options{})
require.NoError(t, err)
require.NoError(t, c.Init())
func TestClient_Iterator_SessionConfigVariations(t *testing.T) {
var expected [][]byte
storageHash, err := util.Uint160DecodeStringLE(storageContractHash)
require.NoError(t, err)
// storageItemsCount is the amount of storage items stored in Storage contract, it's hard-coded in the contract code.
const storageItemsCount = 255
expected := make([][]byte, storageItemsCount)
for i := 0; i < storageItemsCount; i++ {
expected[i] = stackitem.NewBigInteger(big.NewInt(int64(i))).Bytes()
}
sort.Slice(expected, func(i, j int) bool {
if len(expected[i]) != len(expected[j]) {
return len(expected[i]) < len(expected[j])
}
return bytes.Compare(expected[i], expected[j]) < 0
})
res, err := c.InvokeFunction(storageHash, "iterateOverValues", []smartcontract.Parameter{}, nil)
require.NoError(t, err)
require.NotEmpty(t, res.Session)
require.Equal(t, 1, len(res.Stack))
require.Equal(t, stackitem.InteropT, res.Stack[0].Type())
iterator, ok := res.Stack[0].Value().(result.Iterator)
require.True(t, ok)
require.Empty(t, iterator.ID)
require.NotEmpty(t, iterator.Values)
require.True(t, iterator.Truncated)
require.Equal(t, rpcSrv.config.MaxIteratorResultItems, len(iterator.Values))
for i := 0; i < rpcSrv.config.MaxIteratorResultItems; i++ {
// According to the Storage contract code.
require.Equal(t, expected[i], iterator.Values[i].Value().([]byte), i)
checkSessionEnabled := func(t *testing.T, c *client.Client) {
// We expect Iterator with designated ID to be presented on stack. It should be possible to retrieve its values via `traverseiterator` call.
res, err := c.InvokeFunction(storageHash, "iterateOverValues", []smartcontract.Parameter{}, nil)
require.NoError(t, err)
require.NotEmpty(t, res.Session)
require.Equal(t, 1, len(res.Stack))
require.Equal(t, stackitem.InteropT, res.Stack[0].Type())
iterator, ok := res.Stack[0].Value().(result.Iterator)
require.True(t, ok)
require.NotEmpty(t, iterator.ID)
require.Empty(t, iterator.Values)
max := 84
actual, err := c.TraverseIterator(res.Session, *iterator.ID, max)
require.NoError(t, err)
require.Equal(t, max, len(actual))
for i := 0; i < max; i++ {
// According to the Storage contract code.
require.Equal(t, expected[i], actual[i].Value().([]byte), i)
}
}
t.Run("default sessions enabled", func(t *testing.T) {
chain, rpcSrv, httpSrv := initClearServerWithServices(t, false, false, false)
defer chain.Close()
defer rpcSrv.Shutdown()
for _, b := range getTestBlocks(t) {
require.NoError(t, chain.AddBlock(b))
}
c, err := client.New(context.Background(), httpSrv.URL, client.Options{})
require.NoError(t, err)
require.NoError(t, c.Init())
// Fill in expected stackitems set during the first test.
expected = make([][]byte, storageItemsCount)
for i := 0; i < storageItemsCount; i++ {
expected[i] = stackitem.NewBigInteger(big.NewInt(int64(i))).Bytes()
}
sort.Slice(expected, func(i, j int) bool {
if len(expected[i]) != len(expected[j]) {
return len(expected[i]) < len(expected[j])
}
return bytes.Compare(expected[i], expected[j]) < 0
})
checkSessionEnabled(t, c)
})
t.Run("MPT-based sessions enables", func(t *testing.T) {
// Prepare MPT-enabled RPC server.
chain, orc, cfg, logger := getUnitTestChainWithCustomConfig(t, false, false, func(cfg *config.Config) {
cfg.ApplicationConfiguration.RPC.SessionEnabled = true
cfg.ApplicationConfiguration.RPC.SessionBackedByMPT = true
})
serverConfig := network.NewServerConfig(cfg)
serverConfig.UserAgent = fmt.Sprintf(config.UserAgentFormat, "0.98.6-test")
serverConfig.Port = 0
server, err := network.NewServer(serverConfig, chain, chain.GetStateSyncModule(), logger)
require.NoError(t, err)
errCh := make(chan error, 2)
rpcSrv := New(chain, cfg.ApplicationConfiguration.RPC, server, orc, logger, errCh)
rpcSrv.Start()
handler := http.HandlerFunc(rpcSrv.handleHTTPRequest)
httpSrv := httptest.NewServer(handler)
defer chain.Close()
defer rpcSrv.Shutdown()
for _, b := range getTestBlocks(t) {
require.NoError(t, chain.AddBlock(b))
}
c, err := client.New(context.Background(), httpSrv.URL, client.Options{})
require.NoError(t, err)
require.NoError(t, c.Init())
checkSessionEnabled(t, c)
})
t.Run("sessions disabled", func(t *testing.T) {
chain, rpcSrv, httpSrv := initClearServerWithServices(t, false, false, true)
defer chain.Close()
defer rpcSrv.Shutdown()
for _, b := range getTestBlocks(t) {
require.NoError(t, chain.AddBlock(b))
}
c, err := client.New(context.Background(), httpSrv.URL, client.Options{})
require.NoError(t, err)
require.NoError(t, c.Init())
// We expect unpacked iterator values to be present on stack under InteropInterface cover.
res, err := c.InvokeFunction(storageHash, "iterateOverValues", []smartcontract.Parameter{}, nil)
require.NoError(t, err)
require.NotEmpty(t, res.Session)
require.Equal(t, 1, len(res.Stack))
require.Equal(t, stackitem.InteropT, res.Stack[0].Type())
iterator, ok := res.Stack[0].Value().(result.Iterator)
require.True(t, ok)
require.Empty(t, iterator.ID)
require.NotEmpty(t, iterator.Values)
require.True(t, iterator.Truncated)
require.Equal(t, rpcSrv.config.MaxIteratorResultItems, len(iterator.Values))
for i := 0; i < rpcSrv.config.MaxIteratorResultItems; i++ {
// According to the Storage contract code.
require.Equal(t, expected[i], iterator.Values[i].Value().([]byte), i)
}
})
}

View file

@ -93,10 +93,14 @@ type (
// session holds a set of iterators got after invoke* call with corresponding
// finalizer and session expiration time.
session struct {
iteratorsLock sync.Mutex
iterators []result.ServerIterator
timer *time.Timer
finalize func()
params *result.InvocationParams
iteratorsLock sync.Mutex
iteratorIdentifiers []result.IteratorIdentifier
// iterators stores the set of Iterator stackitems for the current session got from MPT-backed storage.
// iterators is non-nil iff SessionBackedByMPT is enabled.
iterators []stackitem.Item
timer *time.Timer
finalize func()
}
)
@ -1911,12 +1915,7 @@ func (s *Server) getFakeNextBlock(nextBlockHeight uint32) (*block.Block, error)
return b, nil
}
// runScriptInVM runs the given script in a new test VM and returns the invocation
// result. The script is either a simple script in case of `application` trigger,
// witness invocation script in case of `verification` trigger (it pushes `verify`
// arguments on stack before verification). In case of contract verification
// contractScriptHash should be specified.
func (s *Server) runScriptInVM(t trigger.Type, script []byte, contractScriptHash util.Uint160, tx *transaction.Transaction, b *block.Block, verbose bool) (*result.Invoke, *response.Error) {
func (s *Server) prepareInvocationContext(t trigger.Type, script []byte, contractScriptHash util.Uint160, tx *transaction.Transaction, b *block.Block, verbose bool) (*interop.Context, *response.Error) {
var (
err error
ic *interop.Context
@ -1952,21 +1951,46 @@ func (s *Server) runScriptInVM(t trigger.Type, script []byte, contractScriptHash
} else {
ic.VM.LoadScriptWithFlags(script, callflag.All)
}
err = ic.VM.Run()
return ic, nil
}
// runScriptInVM runs the given script in a new test VM and returns the invocation
// result. The script is either a simple script in case of `application` trigger,
// witness invocation script in case of `verification` trigger (it pushes `verify`
// arguments on stack before verification). In case of contract verification
// contractScriptHash should be specified.
func (s *Server) runScriptInVM(t trigger.Type, script []byte, contractScriptHash util.Uint160, tx *transaction.Transaction, b *block.Block, verbose bool) (*result.Invoke, *response.Error) {
ic, respErr := s.prepareInvocationContext(t, script, contractScriptHash, tx, b, verbose)
if respErr != nil {
return nil, respErr
}
err := ic.VM.Run()
var faultException string
if err != nil {
faultException = err.Error()
}
var registerSession result.OnNewSession
var (
registerSession result.OnNewSession
params *result.InvocationParams
)
if s.config.SessionEnabled {
registerSession = s.registerSession
if s.config.SessionBackedByMPT {
params = &result.InvocationParams{
Trigger: t,
Script: script,
ContractScriptHash: contractScriptHash,
Transaction: tx,
NextBlockHeight: ic.Block.Index,
}
}
}
return result.NewInvoke(ic, script, faultException, registerSession, s.config.MaxIteratorResultItems), nil
return result.NewInvoke(ic, script, faultException, registerSession, s.config.MaxIteratorResultItems, params), nil
}
// registerSession is a callback used to add new iterator session to the sessions list.
// It performs no check whether sessions are enabled.
func (s *Server) registerSession(sessionID string, iterators []result.ServerIterator, finalize func()) {
func (s *Server) registerSession(sessionID string, iterators []result.IteratorIdentifier, params *result.InvocationParams, finalize func()) {
s.sessionsLock.Lock()
timer := time.AfterFunc(time.Second*time.Duration(s.config.SessionExpirationTime), func() {
s.sessionsLock.Lock()
@ -1986,9 +2010,10 @@ func (s *Server) registerSession(sessionID string, iterators []result.ServerIter
sess.iteratorsLock.Unlock()
})
sess := &session{
iterators: iterators,
finalize: finalize,
timer: timer,
params: params,
iteratorIdentifiers: iterators,
finalize: finalize,
timer: timer,
}
s.sessions[sessionID] = sess
s.sessionsLock.Unlock()
@ -2030,12 +2055,44 @@ func (s *Server) traverseIterator(reqParams request.Params) (interface{}, *respo
s.sessionsLock.Unlock()
var (
iIDStr = iID.String()
iVals []stackitem.Item
iIDStr = iID.String()
iVals []stackitem.Item
respErr *response.Error
)
for _, it := range session.iterators {
for i, it := range session.iteratorIdentifiers {
if iIDStr == it.ID {
iVals = iterator.Values(it.Item, count)
if it.Item != nil { // If Iterator stackitem is there, then use it to retrieve iterator elements.
iVals = iterator.Values(it.Item, count)
} else { // Otherwise, use MPT-backed historic call to retrieve and traverse iterator.
if len(session.iterators) == 0 {
var (
b *block.Block
ic *interop.Context
)
b, err = s.getFakeNextBlock(session.params.NextBlockHeight)
if err != nil {
session.iteratorsLock.Unlock()
return nil, response.NewInternalServerError(fmt.Sprintf("unable to prepare block for historic call: %s", err))
}
ic, respErr = s.prepareInvocationContext(session.params.Trigger, session.params.Script, session.params.ContractScriptHash, session.params.Transaction, b, false)
if respErr != nil {
session.iteratorsLock.Unlock()
return nil, respErr
}
_ = ic.VM.Run() // No error check because FAULTed invocations could also contain iterator on stack.
stack := ic.VM.Estack().ToArray()
for _, itID := range session.iteratorIdentifiers {
j := itID.StackIndex
if (stack[j].Type() != stackitem.InteropT) || !iterator.IsIterator(stack[j]) {
session.iteratorsLock.Unlock()
return nil, response.NewInternalServerError(fmt.Sprintf("inconsistent historic call result: expected %s, got %s at stack position #%d", stackitem.InteropT, stack[j].Type(), j))
}
session.iterators = append(session.iterators, stack[j])
}
session.finalize = ic.Finalize
}
iVals = iterator.Values(session.iterators[i], count)
}
break
}
}