*: apply go 1.19 formatter heuristics

And make manual corrections where needed. See the "Common mistakes
and pitfalls" section of https://tip.golang.org/doc/comment.
This commit is contained in:
Anna Shaleva 2022-08-08 13:23:21 +03:00
parent bb751535d3
commit 916f2293b8
20 changed files with 167 additions and 150 deletions

View file

@ -210,12 +210,13 @@ func lastStmtIsReturn(body *ast.BlockStmt) (b bool) {
// analyzePkgOrder sets the order in which packages should be processed. // analyzePkgOrder sets the order in which packages should be processed.
// From Go spec: // From Go spec:
// A package with no imports is initialized by assigning initial values to all its package-level variables //
// followed by calling all init functions in the order they appear in the source, possibly in multiple files, // A package with no imports is initialized by assigning initial values to all its package-level variables
// as presented to the compiler. If a package has imports, the imported packages are initialized before // followed by calling all init functions in the order they appear in the source, possibly in multiple files,
// initializing the package itself. If multiple packages import a package, the imported package // as presented to the compiler. If a package has imports, the imported packages are initialized before
// will be initialized only once. The importing of packages, by construction, guarantees // initializing the package itself. If multiple packages import a package, the imported package
// that there can be no cyclic initialization dependencies. // will be initialized only once. The importing of packages, by construction, guarantees
// that there can be no cyclic initialization dependencies.
func (c *codegen) analyzePkgOrder() { func (c *codegen) analyzePkgOrder() {
seen := make(map[string]bool) seen := make(map[string]bool)
info := c.buildInfo.program[0] info := c.buildInfo.program[0]

View file

@ -1341,19 +1341,21 @@ func (c *codegen) isCallExprSyscall(e ast.Expr) bool {
// processDefers emits code for `defer` statements. // processDefers emits code for `defer` statements.
// TRY-related opcodes handle exception as follows: // TRY-related opcodes handle exception as follows:
// 1. CATCH block is executed only if exception has occurred. // 1. CATCH block is executed only if exception has occurred.
// 2. FINALLY block is always executed, but after catch block. // 2. FINALLY block is always executed, but after catch block.
//
// Go `defer` statements are a bit different: // Go `defer` statements are a bit different:
// 1. `defer` is always executed irregardless of whether an exception has occurred. // 1. `defer` is always executed irregardless of whether an exception has occurred.
// 2. `recover` can or can not handle a possible exception. // 2. `recover` can or can not handle a possible exception.
//
// Thus, we use the following approach: // Thus, we use the following approach:
// 1. Throwed exception is saved in a static field X, static fields Y and it is set to true. // 1. Throwed exception is saved in a static field X, static fields Y and it is set to true.
// 2. For each defer local there is a dedicated local variable which is set to 1 if `defer` statement // 2. For each defer local there is a dedicated local variable which is set to 1 if `defer` statement
// is encountered during an actual execution. // is encountered during an actual execution.
// 3. CATCH and FINALLY blocks are the same, and both contain the same CALLs. // 3. CATCH and FINALLY blocks are the same, and both contain the same CALLs.
// 4. Right before the CATCH block, check a variable from (2). If it is null, jump to the end of CATCH+FINALLY block. // 4. Right before the CATCH block, check a variable from (2). If it is null, jump to the end of CATCH+FINALLY block.
// 5. In CATCH block we set Y to true and emit default return values if it is the last defer. // 5. In CATCH block we set Y to true and emit default return values if it is the last defer.
// 6. Execute FINALLY block only if Y is false. // 6. Execute FINALLY block only if Y is false.
func (c *codegen) processDefers() { func (c *codegen) processDefers() {
for i := len(c.scope.deferStack) - 1; i >= 0; i-- { for i := len(c.scope.deferStack) - 1; i >= 0; i-- {
stmt := c.scope.deferStack[i] stmt := c.scope.deferStack[i]
@ -1399,10 +1401,10 @@ func (c *codegen) processDefers() {
// emitExplicitConvert handles `someType(someValue)` conversions between string/[]byte. // emitExplicitConvert handles `someType(someValue)` conversions between string/[]byte.
// Rules for conversion: // Rules for conversion:
// 1. interop.* types are converted to ByteArray if not already. // 1. interop.* types are converted to ByteArray if not already.
// 2. Otherwise, convert between ByteArray/Buffer. // 2. Otherwise, convert between ByteArray/Buffer.
// 3. Rules for types which are not string/[]byte should already // 3. Rules for types which are not string/[]byte should already
// be enforced by go parser. // be enforced by go parser.
func (c *codegen) emitExplicitConvert(from, to types.Type) { func (c *codegen) emitExplicitConvert(from, to types.Type) {
if isInteropPath(to.String()) { if isInteropPath(to.String()) {
if isByteSlice(from) && !isString(from) { if isByteSlice(from) && !isString(from) {
@ -1859,10 +1861,10 @@ func (c *codegen) convertBuiltin(expr *ast.CallExpr) {
// transformArgs returns a list of function arguments // transformArgs returns a list of function arguments
// which should be put on stack. // which should be put on stack.
// There are special cases for builtins: // There are special cases for builtins:
// 1. With FromAddress, parameter conversion is happening at compile-time // 1. With FromAddress, parameter conversion is happening at compile-time
// so there is no need to push parameters on stack and perform an actual call // so there is no need to push parameters on stack and perform an actual call
// 2. With panic, the generated code depends on the fact if an argument was nil or a string; // 2. With panic, the generated code depends on the fact if an argument was nil or a string;
// so, it should be handled accordingly. // so, it should be handled accordingly.
func transformArgs(fs *funcScope, fun ast.Expr, args []ast.Expr) []ast.Expr { func transformArgs(fs *funcScope, fun ast.Expr, args []ast.Expr) []ast.Expr {
switch f := fun.(type) { switch f := fun.(type) {
case *ast.SelectorExpr: case *ast.SelectorExpr:

View file

@ -15,11 +15,12 @@ import (
// inlineCall inlines call of n for function represented by f. // inlineCall inlines call of n for function represented by f.
// Call `f(a,b)` for definition `func f(x,y int)` is translated to block: // Call `f(a,b)` for definition `func f(x,y int)` is translated to block:
// { //
// x := a // {
// y := b // x := a
// <inline body of f directly> // y := b
// } // <inline body of f directly>
// }
func (c *codegen) inlineCall(f *funcScope, n *ast.CallExpr) { func (c *codegen) inlineCall(f *funcScope, n *ast.CallExpr) {
offSz := len(c.inlineContext) offSz := len(c.inlineContext)
c.inlineContext = append(c.inlineContext, inlineContextSingle{ c.inlineContext = append(c.inlineContext, inlineContextSingle{

View file

@ -32,8 +32,8 @@ var (
// TestCreateBasicChain generates "../rpc/testdata/testblocks.acc" file which // TestCreateBasicChain generates "../rpc/testdata/testblocks.acc" file which
// contains data for RPC unit tests. It also is a nice integration test. // contains data for RPC unit tests. It also is a nice integration test.
// To generate new "../rpc/testdata/testblocks.acc", follow the steps: // To generate new "../rpc/testdata/testblocks.acc", follow the steps:
// 1. Set saveChain down below to true // 1. Set saveChain down below to true
// 2. Run tests with `$ make test` // 2. Run tests with `$ make test`
func TestCreateBasicChain(t *testing.T) { func TestCreateBasicChain(t *testing.T) {
const saveChain = false const saveChain = false

View file

@ -2,7 +2,7 @@
Package core implements Neo ledger functionality. Package core implements Neo ledger functionality.
It's built around the Blockchain structure that maintains state of the ledger. It's built around the Blockchain structure that maintains state of the ledger.
Events # Events
You can subscribe to Blockchain events using a set of Subscribe and Unsubscribe You can subscribe to Blockchain events using a set of Subscribe and Unsubscribe
methods. These methods accept channels that will be used to send appropriate methods. These methods accept channels that will be used to send appropriate
@ -24,6 +24,5 @@ way they're stored in the block.
Be careful using these subscriptions, this mechanism is not intended to be used Be careful using these subscriptions, this mechanism is not intended to be used
by lots of subscribers and failing to read from event channels can affect by lots of subscribers and failing to read from event channels can affect
other Blockchain operations. other Blockchain operations.
*/ */
package core package core

View file

@ -21,12 +21,12 @@ var (
// Billet is a part of an MPT trie with missing hash nodes that need to be restored. // Billet is a part of an MPT trie with missing hash nodes that need to be restored.
// Billet is based on the following assumptions: // Billet is based on the following assumptions:
// 1. Refcount can only be incremented (we don't change the MPT structure during restore, // 1. Refcount can only be incremented (we don't change the MPT structure during restore,
// thus don't need to decrease refcount). // thus don't need to decrease refcount).
// 2. Each time a part of a Billet is completely restored, it is collapsed into // 2. Each time a part of a Billet is completely restored, it is collapsed into
// HashNode. // HashNode.
// 3. Any pair (node, path) must be restored only once. It's a duty of an MPT pool to manage // 3. Any pair (node, path) must be restored only once. It's a duty of an MPT pool to manage
// MPT paths in order to provide this assumption. // MPT paths in order to provide this assumption.
type Billet struct { type Billet struct {
TempStoragePrefix storage.KeyPrefix TempStoragePrefix storage.KeyPrefix
Store *storage.MemCachedStore Store *storage.MemCachedStore

View file

@ -37,15 +37,15 @@ func prepareMPTCompat() *Trie {
// TestCompatibility contains tests present in C# implementation. // TestCompatibility contains tests present in C# implementation.
// https://github.com/neo-project/neo-modules/blob/master/tests/Neo.Plugins.StateService.Tests/MPT/UT_MPTTrie.cs // https://github.com/neo-project/neo-modules/blob/master/tests/Neo.Plugins.StateService.Tests/MPT/UT_MPTTrie.cs
// There are some differences, though: // There are some differences, though:
// 1. In our implementation, delete is silent, i.e. we do not return an error if the key is missing or empty. // 1. In our implementation, delete is silent, i.e. we do not return an error if the key is missing or empty.
// However, we do return an error when the contents of the hash node are missing from the store // However, we do return an error when the contents of the hash node are missing from the store
// (corresponds to exception in C# implementation). However, if the key is too big, an error is returned // (corresponds to exception in C# implementation). However, if the key is too big, an error is returned
// (corresponds to exception in C# implementation). // (corresponds to exception in C# implementation).
// 2. In our implementation, put returns an error if something goes wrong, while C# implementation throws // 2. In our implementation, put returns an error if something goes wrong, while C# implementation throws
// an exception and returns nothing. // an exception and returns nothing.
// 3. In our implementation, get does not immediately return any error in case of an empty key. An error is returned // 3. In our implementation, get does not immediately return any error in case of an empty key. An error is returned
// only if the value is missing from the storage. C# implementation checks that the key is not empty and throws an error // only if the value is missing from the storage. C# implementation checks that the key is not empty and throws an error
// otherwise. However, if the key is too big, an error is returned (corresponds to exception in C# implementation). // otherwise. However, if the key is too big, an error is returned (corresponds to exception in C# implementation).
func TestCompatibility(t *testing.T) { func TestCompatibility(t *testing.T) {
mainTrie := prepareMPTCompat() mainTrie := prepareMPTCompat()

View file

@ -4,11 +4,11 @@ Package mpt implements MPT (Merkle-Patricia Trie).
An MPT stores key-value pairs and is a trie over 16-symbol alphabet. https://en.wikipedia.org/wiki/Trie An MPT stores key-value pairs and is a trie over 16-symbol alphabet. https://en.wikipedia.org/wiki/Trie
A trie is a tree where values are stored in leafs and keys are paths from the root to the leaf node. A trie is a tree where values are stored in leafs and keys are paths from the root to the leaf node.
An MPT consists of 4 types of nodes: An MPT consists of 4 types of nodes:
- Leaf node only contains a value. - Leaf node only contains a value.
- Extension node contains both a key and a value. - Extension node contains both a key and a value.
- Branch node contains 2 or more children. - Branch node contains 2 or more children.
- Hash node is a compressed node and only contains the actual node's hash. - Hash node is a compressed node and only contains the actual node's hash.
The actual node must be retrieved from the storage or over the network. The actual node must be retrieved from the storage or over the network.
As an example here is a trie containing 3 pairs: As an example here is a trie containing 3 pairs:
- 0x1201 -> val1 - 0x1201 -> val1
@ -16,18 +16,18 @@ As an example here is a trie containing 3 pairs:
- 0x1224 -> val3 - 0x1224 -> val3
- 0x12 -> val4 - 0x12 -> val4
ExtensionNode(0x0102), Next ExtensionNode(0x0102), Next
_______________________| _______________________|
| |
BranchNode [0, 1, 2, ...], Last -> Leaf(val4) BranchNode [0, 1, 2, ...], Last -> Leaf(val4)
| | | |
| ExtensionNode [0x04], Next -> Leaf(val3) | ExtensionNode [0x04], Next -> Leaf(val3)
| |
BranchNode [0, 1, 2, 3, ...], Last -> HashNode(nil) BranchNode [0, 1, 2, 3, ...], Last -> HashNode(nil)
| | | |
| Leaf(val2) | Leaf(val2)
| |
Leaf(val1) Leaf(val1)
There are 3 invariants that this implementation has: There are 3 invariants that this implementation has:
- Branch node cannot have <= 1 children - Branch node cannot have <= 1 children

View file

@ -97,7 +97,8 @@ func getEffectiveSize(buf []byte, isNeg bool) int {
// ToBytes converts an integer to a slice in little-endian format. // ToBytes converts an integer to a slice in little-endian format.
// Note: NEO3 serialization differs from default C# BigInteger.ToByteArray() // Note: NEO3 serialization differs from default C# BigInteger.ToByteArray()
// when n == 0. For zero is equal to empty slice in NEO3. // when n == 0. For zero is equal to empty slice in NEO3.
//
// https://github.com/neo-project/neo-vm/blob/master/src/neo-vm/Types/Integer.cs#L16 // https://github.com/neo-project/neo-vm/blob/master/src/neo-vm/Types/Integer.cs#L16
func ToBytes(n *big.Int) []byte { func ToBytes(n *big.Int) []byte {
return ToPreallocatedBytes(n, []byte{}) return ToPreallocatedBytes(n, []byte{})

View file

@ -8,6 +8,7 @@ in the documentation of respective functions.
Types defined here are used for proper manifest generation. Here is how Go types Types defined here are used for proper manifest generation. Here is how Go types
correspond to smartcontract and VM types: correspond to smartcontract and VM types:
int-like - Integer int-like - Integer
bool - Boolean bool - Boolean
[]byte - ByteArray (Buffer in VM) []byte - ByteArray (Buffer in VM)
@ -15,8 +16,9 @@ correspond to smartcontract and VM types:
(interface{})(nil) - Any (interface{})(nil) - Any
non-byte slice - Array non-byte slice - Array
map[K]V - map map[K]V - map
Other types are defined explicitly in this pkg: Other types are defined explicitly in this pkg:
Hash160, Hash256, Interface, PublicKey, Signature [Hash160], [Hash256], [Interface], [PublicKey], [Signature].
Note that unless written otherwise structures defined in this packages can't be Note that unless written otherwise structures defined in this packages can't be
correctly created by new() or composite literals, they should be received from correctly created by new() or composite literals, they should be received from

View file

@ -35,36 +35,35 @@ const MinimumResponseGas = 10_000_000
// Request makes an oracle request. It can only be successfully invoked by // Request makes an oracle request. It can only be successfully invoked by
// a deployed contract and it takes the following parameters: // a deployed contract and it takes the following parameters:
// //
// url // - url
// URL to fetch, only https and neofs URLs are supported like // URL to fetch, only https and neofs URLs are supported like
// https://example.com/some.json or // https://example.com/some.json or
// neofs:6pJtLUnGqDxE2EitZYLsDzsfTDVegD6BrRUn8QAFZWyt/5Cyxb3wrHDw5pqY63hb5otCSsJ24ZfYmsA8NAjtho2gr // neofs:6pJtLUnGqDxE2EitZYLsDzsfTDVegD6BrRUn8QAFZWyt/5Cyxb3wrHDw5pqY63hb5otCSsJ24ZfYmsA8NAjtho2gr
// //
// filter // - filter
// JSONPath filter to process the result; if specified, it will be // JSONPath filter to process the result; if specified, it will be
// applied to the data returned from HTTP/NeoFS and you'll only get // applied to the data returned from HTTP/NeoFS and you'll only get
// filtered data in your callback method. // filtered data in your callback method.
// //
// cb // - cb
// name of the method that will process oracle data, it must be a method // name of the method that will process oracle data, it must be a method
// of the same contract that invokes Request and it must have the following // of the same contract that invokes Request and it must have the following
// signature for correct invocation: // signature for correct invocation:
// //
// Method(url string, userData interface{}, code int, result []byte) // - Method(url string, userData interface{}, code int, result []byte)
// where url is the same url specified for Request, userData is anything
// passed in the next parameter, code is the status of the reply and
// result is the data returned from the request if any.
// //
// where url is the same url specified for Request, userData is anything // - userData
// passed in the next parameter, code is the status of the reply and // data to pass to the callback function.
// result is the data returned from the request if any.
// //
// userData // - gasForResponse
// data to pass to the callback function. // GAS attached to this request for reply callback processing,
// // note that it's different from the oracle request price, this
// gasForResponse // GAS is used for oracle transaction's network and system fees,
// GAS attached to this request for reply callback processing, // so it should be enough to pay for reply data as well as
// note that it's different from the oracle request price, this // its processing.
// GAS is used for oracle transaction's network and system fees,
// so it should be enough to pay for reply data as well as
// its processing.
func Request(url string, filter []byte, cb string, userData interface{}, gasForResponse int) { func Request(url string, filter []byte, cb string, userData interface{}, gasForResponse int) {
neogointernal.CallWithTokenNoRet(Hash, "request", neogointernal.CallWithTokenNoRet(Hash, "request",
int(contract.States|contract.AllowNotify), int(contract.States|contract.AllowNotify),

View file

@ -46,11 +46,12 @@ func JSONSerialize(item interface{}) []byte {
// JSONDeserialize deserializes a value from json. It uses `jsonDeserialize` method of StdLib // JSONDeserialize deserializes a value from json. It uses `jsonDeserialize` method of StdLib
// native contract. // native contract.
// It performs deserialization as follows: // It performs deserialization as follows:
// strings -> []byte (string) from base64 //
// integers -> (u)int* types // strings -> []byte (string) from base64
// null -> interface{}(nil) // integers -> (u)int* types
// arrays -> []interface{} // null -> interface{}(nil)
// maps -> map[string]interface{} // arrays -> []interface{}
// maps -> map[string]interface{}
func JSONDeserialize(data []byte) interface{} { func JSONDeserialize(data []byte) interface{} {
return neogointernal.CallWithToken(Hash, "jsonDeserialize", int(contract.NoneFlag), return neogointernal.CallWithToken(Hash, "jsonDeserialize", int(contract.NoneFlag),
data) data)

View file

@ -4,12 +4,13 @@ It can be used to implement unit-tests for contracts in Go using regular Go
conventions. conventions.
Usually it's used like this: Usually it's used like this:
* an instance of the blockchain is created using chain subpackage
* the target contract is compiled using one of Compile* functions - an instance of the blockchain is created using chain subpackage
* and Executor is created for the blockchain - the target contract is compiled using one of Compile* functions
* it's used to deploy a contract with DeployContract - and Executor is created for the blockchain
* CommitteeInvoker and/or ValidatorInvoker are then created to perform test invocations - it's used to deploy a contract with DeployContract
* if needed, NewAccount is used to create an appropriate number of accounts for the test - CommitteeInvoker and/or ValidatorInvoker are then created to perform test invocations
- if needed, NewAccount is used to create an appropriate number of accounts for the test
Higher-order methods provided in Executor and ContractInvoker hide the details Higher-order methods provided in Executor and ContractInvoker hide the details
of transaction creation for the most part, but there are lower-level methods as of transaction creation for the most part, but there are lower-level methods as

View file

@ -1123,9 +1123,10 @@ func (s *Server) handleGetAddrCmd(p Peer) error {
// requestBlocks sends a CMDGetBlockByIndex message to the peer // requestBlocks sends a CMDGetBlockByIndex message to the peer
// to sync up in blocks. A maximum of maxBlockBatch will be // to sync up in blocks. A maximum of maxBlockBatch will be
// sent at once. There are two things we need to take care of: // sent at once. There are two things we need to take care of:
// 1. If possible, blocks should be fetched in parallel. // 1. If possible, blocks should be fetched in parallel.
// height..+500 to one peer, height+500..+1000 to another etc. // height..+500 to one peer, height+500..+1000 to another etc.
// 2. Every block must eventually be fetched even if the peer sends no answer. // 2. Every block must eventually be fetched even if the peer sends no answer.
//
// Thus, the following algorithm is used: // Thus, the following algorithm is used:
// 1. Block range is divided into chunks of payload.MaxHashesCount. // 1. Block range is divided into chunks of payload.MaxHashesCount.
// 2. Send requests for chunk in increasing order. // 2. Send requests for chunk in increasing order.

View file

@ -2,7 +2,7 @@
Package rpcclient implements NEO-specific JSON-RPC 2.0 client. Package rpcclient implements NEO-specific JSON-RPC 2.0 client.
This package is currently in beta and is subject to change. This package is currently in beta and is subject to change.
Client # Client
After creating a client instance with or without a ClientConfig After creating a client instance with or without a ClientConfig
you can interact with the NEO blockchain by its exposed methods. you can interact with the NEO blockchain by its exposed methods.
@ -12,6 +12,7 @@ return a more pretty printed response from the server instead of
a raw hex string. a raw hex string.
TODO: TODO:
Allow client to connect using client cert. Allow client to connect using client cert.
More in-depth examples. More in-depth examples.
@ -75,6 +76,5 @@ Unsupported methods
sendfrom sendfrom
sendmany sendmany
sendtoaddress sendtoaddress
*/ */
package rpcclient package rpcclient

View file

@ -848,15 +848,16 @@ func getSigners(sender *wallet.Account, cosigners []SignerAccount) ([]transactio
// GAS should be deposited to the Notary contract. // GAS should be deposited to the Notary contract.
// Main transaction should be constructed by the user. Several rules should be met for // Main transaction should be constructed by the user. Several rules should be met for
// successful main transaction acceptance: // successful main transaction acceptance:
// 1. Native Notary contract should be a signer of the main transaction. // 1. Native Notary contract should be a signer of the main transaction.
// 2. Notary signer should have None scope. // 2. Notary signer should have None scope.
// 3. Main transaction should have dummy contract witness for Notary signer. // 3. Main transaction should have dummy contract witness for Notary signer.
// 4. Main transaction should have NotaryAssisted attribute with NKeys specified. // 4. Main transaction should have NotaryAssisted attribute with NKeys specified.
// 5. NotaryAssisted attribute and dummy Notary witness (as long as the other incomplete witnesses) // 5. NotaryAssisted attribute and dummy Notary witness (as long as the other incomplete witnesses)
// should be paid for. Use CalculateNotaryWitness to calculate the amount of network fee to pay // should be paid for. Use CalculateNotaryWitness to calculate the amount of network fee to pay
// for the attribute and Notary witness. // for the attribute and Notary witness.
// 6. Main transaction either shouldn't have all witnesses attached (in this case none of them // 6. Main transaction either shouldn't have all witnesses attached (in this case none of them
// can be multisignature), or it only should have a partial multisignature. // can be multisignature), or it only should have a partial multisignature.
//
// Note: client should be initialized before SignAndPushP2PNotaryRequest call. // Note: client should be initialized before SignAndPushP2PNotaryRequest call.
func (c *Client) SignAndPushP2PNotaryRequest(mainTx *transaction.Transaction, fallbackScript []byte, fallbackSysFee int64, fallbackNetFee int64, fallbackValidFor uint32, acc *wallet.Account) (*payload.P2PNotaryRequest, error) { func (c *Client) SignAndPushP2PNotaryRequest(mainTx *transaction.Transaction, fallbackScript []byte, fallbackSysFee int64, fallbackNetFee int64, fallbackValidFor uint32, acc *wallet.Account) (*payload.P2PNotaryRequest, error) {
var err error var err error

View file

@ -141,18 +141,20 @@ func (pt *ParamType) DecodeBinary(r *io.BinReader) {
// ParseParamType is a user-friendly string to ParamType converter, it's // ParseParamType is a user-friendly string to ParamType converter, it's
// case-insensitive and makes the following conversions: // case-insensitive and makes the following conversions:
// signature -> SignatureType //
// bool, boolean -> BoolType // signature -> SignatureType
// int, integer -> IntegerType // bool, boolean -> BoolType
// hash160 -> Hash160Type // int, integer -> IntegerType
// hash256 -> Hash256Type // hash160 -> Hash160Type
// bytes, bytearray, filebytes -> ByteArrayType // hash256 -> Hash256Type
// key, publickey -> PublicKeyType // bytes, bytearray, filebytes -> ByteArrayType
// string -> StringType // key, publickey -> PublicKeyType
// array, struct -> ArrayType // string -> StringType
// map -> MapType // array, struct -> ArrayType
// interopinterface -> InteropInterfaceType // map -> MapType
// void -> VoidType // interopinterface -> InteropInterfaceType
// void -> VoidType
//
// anything else generates an error. // anything else generates an error.
func ParseParamType(typ string) (ParamType, error) { func ParseParamType(typ string) (ParamType, error) {
switch strings.ToLower(typ) { switch strings.ToLower(typ) {

View file

@ -118,9 +118,10 @@ func (u Uint256) MarshalJSON() ([]byte, error) {
} }
// CompareTo compares two Uint256 with each other. Possible output: 1, -1, 0 // CompareTo compares two Uint256 with each other. Possible output: 1, -1, 0
// 1 implies u > other. //
// 1 implies u > other.
// -1 implies u < other. // -1 implies u < other.
// 0 implies u = other. // 0 implies u = other.
func (u Uint256) CompareTo(other Uint256) int { return bytes.Compare(u[:], other[:]) } func (u Uint256) CompareTo(other Uint256) int { return bytes.Compare(u[:], other[:]) }
// EncodeBinary implements the io.Serializable interface. // EncodeBinary implements the io.Serializable interface.

View file

@ -237,8 +237,9 @@ func (s *Stack) RemoveAt(n int) Element {
// Dup duplicates and returns the element at position n. // Dup duplicates and returns the element at position n.
// Dup is used for copying elements on the top of its own stack. // Dup is used for copying elements on the top of its own stack.
// s.Push(s.Peek(0)) // will result in unexpected behavior. //
// s.Push(s.Dup(0)) // is the correct approach. // s.Push(s.Peek(0)) // will result in unexpected behavior.
// s.Push(s.Dup(0)) // is the correct approach.
func (s *Stack) Dup(n int) Element { func (s *Stack) Dup(n int) Element {
e := s.Peek(n) e := s.Peek(n)
return Element{e.value.Dup()} return Element{e.value.Dup()}
@ -246,9 +247,10 @@ func (s *Stack) Dup(n int) Element {
// Iter iterates over all elements int the stack, starting from the top // Iter iterates over all elements int the stack, starting from the top
// of the stack. // of the stack.
// s.Iter(func(elem *Element) { //
// s.Iter(func(elem *Element) {
// // do something with the element. // // do something with the element.
// }) // })
func (s *Stack) Iter(f func(Element)) { func (s *Stack) Iter(f func(Element)) {
for i := len(s.elems) - 1; i >= 0; i-- { for i := len(s.elems) - 1; i >= 0; i-- {
f(s.elems[i]) f(s.elems[i])
@ -257,9 +259,10 @@ func (s *Stack) Iter(f func(Element)) {
// IterBack iterates over all elements of the stack, starting from the bottom // IterBack iterates over all elements of the stack, starting from the bottom
// of the stack. // of the stack.
// s.IterBack(func(elem *Element) { //
// s.IterBack(func(elem *Element) {
// // do something with the element. // // do something with the element.
// }) // })
func (s *Stack) IterBack(f func(Element)) { func (s *Stack) IterBack(f func(Element)) {
for i := 0; i < len(s.elems); i++ { for i := 0; i < len(s.elems); i++ {
f(s.elems[i]) f(s.elems[i])

View file

@ -36,12 +36,13 @@ var ErrTooDeep = errors.New("too deep")
// ToJSON encodes Item to JSON. // ToJSON encodes Item to JSON.
// It behaves as following: // It behaves as following:
// ByteArray -> base64 string //
// BigInteger -> number // ByteArray -> base64 string
// Bool -> bool // BigInteger -> number
// Null -> null // Bool -> bool
// Array, Struct -> array // Null -> null
// Map -> map with keys as UTF-8 bytes // Array, Struct -> array
// Map -> map with keys as UTF-8 bytes
func ToJSON(item Item) ([]byte, error) { func ToJSON(item Item) ([]byte, error) {
seen := make(map[Item]sliceNoPointer, typicalNumOfItems) seen := make(map[Item]sliceNoPointer, typicalNumOfItems)
return toJSON(nil, seen, item) return toJSON(nil, seen, item)
@ -153,12 +154,13 @@ func itemToJSONString(it Item) ([]byte, error) {
// FromJSON decodes an Item from JSON. // FromJSON decodes an Item from JSON.
// It behaves as following: // It behaves as following:
// string -> ByteArray from base64 //
// number -> BigInteger // string -> ByteArray from base64
// bool -> Bool // number -> BigInteger
// null -> Null // bool -> Bool
// array -> Array // null -> Null
// map -> Map, keys are UTF-8 // array -> Array
// map -> Map, keys are UTF-8
func FromJSON(data []byte, maxCount int) (Item, error) { func FromJSON(data []byte, maxCount int) (Item, error) {
d := decoder{ d := decoder{
Decoder: *json.NewDecoder(bytes.NewReader(data)), Decoder: *json.NewDecoder(bytes.NewReader(data)),