vm: fix OOM during nested structure cloning
Resulting item can't have more than MaxStackSize elements. Technically this limits to MaxStackSize cloned elements but it's considered to be enough to mitigate the issue (the next size check is going to happen during push to the stack). See neo-project/neo#2534, thanks @vang1ong7ang.
This commit is contained in:
parent
1853d0c713
commit
cfe41abd35
4 changed files with 59 additions and 9 deletions
|
@ -298,17 +298,30 @@ func (i *Struct) Convert(typ Type) (Item, error) {
|
||||||
|
|
||||||
// Clone returns a Struct with all Struct fields copied by value.
|
// Clone returns a Struct with all Struct fields copied by value.
|
||||||
// Array fields are still copied by reference.
|
// Array fields are still copied by reference.
|
||||||
func (i *Struct) Clone() *Struct {
|
func (i *Struct) Clone(limit int) (*Struct, error) {
|
||||||
|
return i.clone(&limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Struct) clone(limit *int) (*Struct, error) {
|
||||||
ret := &Struct{make([]Item, len(i.value))}
|
ret := &Struct{make([]Item, len(i.value))}
|
||||||
for j := range i.value {
|
for j := range i.value {
|
||||||
switch t := i.value[j].(type) {
|
switch t := i.value[j].(type) {
|
||||||
case *Struct:
|
case *Struct:
|
||||||
ret.value[j] = t.Clone()
|
var err error
|
||||||
|
|
||||||
|
ret.value[j], err = t.clone(limit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
*limit--
|
||||||
default:
|
default:
|
||||||
ret.value[j] = t
|
ret.value[j] = t
|
||||||
}
|
}
|
||||||
|
if *limit < 0 {
|
||||||
|
return nil, ErrTooBig
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ret
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Null represents null on the stack.
|
// Null represents null on the stack.
|
||||||
|
|
|
@ -465,6 +465,15 @@ func TestNewVeryBigInteger(t *testing.T) {
|
||||||
check(false, new(big.Int).Mul(maxBitSet, big.NewInt(2)))
|
check(false, new(big.Int).Mul(maxBitSet, big.NewInt(2)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStructClone(t *testing.T) {
|
||||||
|
st0 := Struct{}
|
||||||
|
st := Struct{value: []Item{&st0}}
|
||||||
|
_, err := st.Clone(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = st.Clone(0)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestDeepCopy(t *testing.T) {
|
func TestDeepCopy(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
22
pkg/vm/vm.go
22
pkg/vm/vm.go
|
@ -1056,7 +1056,10 @@ func (v *VM) execute(ctx *Context, op opcode.Opcode, parameter []byte) (err erro
|
||||||
itemElem := v.estack.Pop()
|
itemElem := v.estack.Pop()
|
||||||
arrElem := v.estack.Pop()
|
arrElem := v.estack.Pop()
|
||||||
|
|
||||||
val := cloneIfStruct(itemElem.value)
|
val, err := cloneIfStruct(itemElem.value)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
switch t := arrElem.value.(type) {
|
switch t := arrElem.value.(type) {
|
||||||
case *stackitem.Array:
|
case *stackitem.Array:
|
||||||
|
@ -1370,12 +1373,19 @@ func (v *VM) execute(ctx *Context, op opcode.Opcode, parameter []byte) (err erro
|
||||||
src := t.Value().([]stackitem.Item)
|
src := t.Value().([]stackitem.Item)
|
||||||
arr = make([]stackitem.Item, len(src))
|
arr = make([]stackitem.Item, len(src))
|
||||||
for i := range src {
|
for i := range src {
|
||||||
arr[i] = cloneIfStruct(src[i])
|
arr[i], err = cloneIfStruct(src[i])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case *stackitem.Map:
|
case *stackitem.Map:
|
||||||
arr = make([]stackitem.Item, 0, t.Len())
|
arr = make([]stackitem.Item, 0, t.Len())
|
||||||
for k := range t.Value().([]stackitem.MapElement) {
|
for k := range t.Value().([]stackitem.MapElement) {
|
||||||
arr = append(arr, cloneIfStruct(t.Value().([]stackitem.MapElement)[k].Value))
|
elem, err := cloneIfStruct(t.Value().([]stackitem.MapElement)[k].Value)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
arr = append(arr, elem)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
panic("not a Map, Array or Struct")
|
panic("not a Map, Array or Struct")
|
||||||
|
@ -1741,12 +1751,12 @@ func checkMultisig1(v *VM, curve elliptic.Curve, h []byte, pkeys [][]byte, sig [
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneIfStruct(item stackitem.Item) stackitem.Item {
|
func cloneIfStruct(item stackitem.Item) (stackitem.Item, error) {
|
||||||
switch it := item.(type) {
|
switch it := item.(type) {
|
||||||
case *stackitem.Struct:
|
case *stackitem.Struct:
|
||||||
return it.Clone()
|
return it.Clone(MaxStackSize)
|
||||||
default:
|
default:
|
||||||
return it
|
return it, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package vm
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
@ -2431,6 +2432,23 @@ func TestSLOTOpcodes(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNestedStructClone(t *testing.T) {
|
||||||
|
progs := []string{
|
||||||
|
// VALUES for deeply nested structs, see neo-project/neo#2534.
|
||||||
|
"5601c501fe0360589d604a12c0db415824f7cd45",
|
||||||
|
// APPEND of deeply nested struct to empty array.
|
||||||
|
"5601c2c501fe0360589d604a12c0db415824f7cf45",
|
||||||
|
// VALUES for map with deeply nested struct.
|
||||||
|
"5601c84a11c501fe0060589d604a12c0db415824f7d0cd45",
|
||||||
|
}
|
||||||
|
for _, h := range progs {
|
||||||
|
prog, err := hex.DecodeString(h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
vm := load(prog)
|
||||||
|
checkVMFailed(t, vm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func makeProgram(opcodes ...opcode.Opcode) []byte {
|
func makeProgram(opcodes ...opcode.Opcode) []byte {
|
||||||
prog := make([]byte, len(opcodes)+1) // RET
|
prog := make([]byte, len(opcodes)+1) // RET
|
||||||
for i := 0; i < len(opcodes); i++ {
|
for i := 0; i < len(opcodes); i++ {
|
||||||
|
|
Loading…
Reference in a new issue