Merge pull request #19 from nspcc-dev/feat/avx_inline

Speed up AVX implementation
This commit is contained in:
fyrchik 2019-10-17 17:53:41 +03:00 committed by GitHub
commit 3d96a71c03
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 170 additions and 57 deletions

View file

@ -28,12 +28,14 @@ The example of how it works can be seen in tests.
# Benchmarks # Benchmarks
## AVX vs AVX2 version ## go vs AVX vs AVX2 version
``` ```
BenchmarkAVX-8 500 3492019 ns/op 28.64 MB/s 64 B/op 4 allocs/op BenchmarkSum/AVX_digest-8 308 3889484 ns/op 25.71 MB/s 5 allocs/op
BenchmarkAVX2-8 500 2752693 ns/op 36.33 MB/s 64 B/op 2 allocs/op BenchmarkSum/AVXInline_digest-8 457 2455437 ns/op 40.73 MB/s 5 allocs/op
BenchmarkAVX2Inline-8 1000 1877260 ns/op 53.27 MB/s 64 B/op 2 allocs/op BenchmarkSum/AVX2_digest-8 399 3031102 ns/op 32.99 MB/s 3 allocs/op
BenchmarkSum/AVX2Inline_digest-8 602 2077719 ns/op 48.13 MB/s 3 allocs/op
BenchmarkSum/PureGo_digest-8 68 17795480 ns/op 5.62 MB/s 5 allocs/op
``` ```
# Contributing # Contributing

View file

@ -44,3 +44,29 @@ TEXT ·mulByteRightx2(SB),NOSPLIT,$0
VMOVDQA Y0, (AX) VMOVDQA Y0, (AX)
RET RET
// func mulBitRightx2(c00c10, c01c11 *[4]uint64, e *[2]uint64)
TEXT ·mulBitRightx2(SB),NOSPLIT,$0
MOVQ c00c10+0(FP), AX
VMOVDQA (AX), Y0
MOVQ c01c11+8(FP), BX
VMOVDQA (BX), Y8
VPSLLQ $1, Y0, Y1
VPALIGNR $8, Y1, Y0, Y2
VPSRLQ $63, Y2, Y2
VPXOR Y1, Y2, Y2
VPSRLQ $63, Y1, Y3
VPSLLQ $63, Y3, Y3
VPUNPCKHQDQ Y3, Y3, Y3
VPXOR Y2, Y3, Y3
MOVQ e+16(FP), CX
VBROADCASTI128 (CX), Y2
VPXOR Y3, Y8, Y3
VPAND Y3, Y2, Y4
VPXOR Y4, Y0, Y8
VMOVDQA Y8, (BX)
VMOVDQA Y3, (AX)
RET

View file

@ -12,6 +12,30 @@
XORPD R2, TO \ XORPD R2, TO \
XORPD R3, TO XORPD R3, TO
#define mask(bit, src, tmp, to1, to2) \
MOVQ src, tmp \
SHRQ bit, tmp \
ANDQ $1, tmp \
NEGQ tmp \
MOVQ tmp, to1 \
VSHUFPS $0, to1, to1, to2
// VPBROADCASTB to1, to2
// Can't use VPBROADCASTB because it is AVX2 instruction
//https://software.intel.com/en-us/forums/intel-isa-extensions/topic/301461
#define mulBit(bit) \
MOVUPD X0, X8 \
MOVUPD X2, X9 \
mul2(X0, X5, X6, X7) \
VXORPD X1, X5, X0 \
mul2(X2, X5, X6, X7) \
VXORPD X3, X5, X2 \
mask(bit, CX, DX, X6, X5) \
VANDPD X0, X5, X1 \
XORPD X8, X1 \
VANDPD X2, X5, X3 \
XORPD X9, X3
// func mulBitRight(c00, c01, c10, c11, e *[2]uint64) // func mulBitRight(c00, c01, c10, c11, e *[2]uint64)
TEXT ·mulBitRight(SB),NOSPLIT,$0 TEXT ·mulBitRight(SB),NOSPLIT,$0
MOVQ c00+0(FP), AX MOVQ c00+0(FP), AX
@ -25,63 +49,50 @@ TEXT ·mulBitRight(SB),NOSPLIT,$0
MOVQ c11+24(FP), DX MOVQ c11+24(FP), DX
MOVUPD (DX), X3 MOVUPD (DX), X3
// c00 *= 2 mul2(X0, X5, X6, X7) // c00 *= 2
mul2(X0, X5, X6, X7) VXORPD X5, X1, X0 // c00 += c01
MOVUPD X5, X0 mul2(X2, X5, X6, X7) // c10 *= 2
VXORPD X3, X5, X2 // c10 += c11
MOVQ e+32(FP), CX
MOVUPD (CX), X5
VANDPD X0, X5, X1 // c01 = c00 + e
XORPD X8, X1 // c01 += X8 (old c00)
VANDPD X2, X5, X3 // c11 = c10 + e
XORPD X9, X3 // c11 += x9 (old c10)
// c00 += c01
XORPD X1, X0
MOVUPD X0, (AX) MOVUPD X0, (AX)
MOVQ c10+16(FP), CX
// c10 *= 2
mul2(X2, X5, X6, X7)
MOVUPD X5, X2
// c10 += c11
XORPD X3, X2
MOVUPD X2, (CX) MOVUPD X2, (CX)
MOVQ e+32(FP), AX
MOVUPD (AX), X5
// c01 = c00 + e
VANDPD X0, X5, X1
// c01 += X8 (old c00)
XORPD X8, X1
MOVUPD X1, (BX) MOVUPD X1, (BX)
// c11 = c10 + e
VANDPD X2, X5, X3
// c11 += X9 (old c10)
XORPD X9, X3
MOVUPD X3, (DX) MOVUPD X3, (DX)
RET RET
TEXT ·mulByteRight(SB),NOSPLIT,$0
MOVQ c00+0(FP), AX
MOVUPD (AX), X0
MOVQ c01+8(FP), BX
MOVUPD (BX), X1
MOVQ c10+16(FP), CX
MOVUPD (CX), X2
MOVQ c11+24(FP), DX
MOVUPD (DX), X3
MOVB b+32(FP), CX
// func mulBitRightx2(c00c10, c01c11 *[4]uint64, e *[2]uint64) mulBit($7)
TEXT ·mulBitRightx2(SB),NOSPLIT,$0 mulBit($6)
MOVQ c00c10+0(FP), AX mulBit($5)
VMOVDQA (AX), Y0 mulBit($4)
MOVQ c01c11+8(FP), BX mulBit($3)
VMOVDQA (BX), Y8 mulBit($2)
mulBit($1)
mulBit($0)
VPSLLQ $1, Y0, Y1 MOVUPD X0, (AX)
VPALIGNR $8, Y1, Y0, Y2 MOVQ c10+16(FP), CX
VPSRLQ $63, Y2, Y2 MOVUPD X2, (CX)
VPXOR Y1, Y2, Y2 MOVUPD X1, (BX)
VPSRLQ $63, Y1, Y3 MOVQ c11+24(FP), DX
VPSLLQ $63, Y3, Y3 MOVUPD X3, (DX)
VPUNPCKHQDQ Y3, Y3, Y3
VPXOR Y2, Y3, Y3
MOVQ e+16(FP), CX
VBROADCASTI128 (CX), Y2
VPXOR Y3, Y8, Y3
VPAND Y3, Y2, Y4
VPXOR Y4, Y0, Y8
VMOVDQA Y8, (BX)
VMOVDQA Y3, (AX)
RET RET

65
tz/avx_inline.go Normal file
View file

@ -0,0 +1,65 @@
// Copyright 2018 (c) NSPCC
//
// This file contains AVX implementation.
package tz
import (
"hash"
)
type digest4 struct {
x [4]GF127
}
// type assertion
var _ hash.Hash = (*digest4)(nil)
func newAVXInline() *digest4 {
d := new(digest4)
d.Reset()
return d
}
func (d *digest4) Sum(in []byte) []byte {
// Make a copy of d so that caller can keep writing and summing.
d0 := *d
h := d0.checkSum()
return append(in, h[:]...)
}
func (d *digest4) checkSum() [hashSize]byte {
return d.byteArray()
}
func (d *digest4) byteArray() (b [hashSize]byte) {
copy(b[:], d.x[0].ByteArray())
copy(b[16:], d.x[1].ByteArray())
copy(b[32:], d.x[2].ByteArray())
copy(b[48:], d.x[3].ByteArray())
return
}
func (d *digest4) Reset() {
d.x[0] = GF127{1, 0}
d.x[1] = GF127{0, 0}
d.x[2] = GF127{0, 0}
d.x[3] = GF127{1, 0}
}
func (d *digest4) Write(data []byte) (n int, err error) {
n = len(data)
for _, b := range data {
mulByteRight(&d.x[0], &d.x[1], &d.x[2], &d.x[3], b)
}
return
}
func (d *digest4) Size() int {
return hashSize
}
func (d *digest4) BlockSize() int {
return hashBlockSize
}
func mulByteRight(c00, c01, c10, c11 *GF127, b byte)

View file

@ -21,6 +21,7 @@ const (
AVX2 AVX2
AVX2Inline AVX2Inline
PureGo PureGo
AVXInline
) )
var ( var (
@ -34,6 +35,8 @@ func (impl Implementation) String() string {
switch impl { switch impl {
case AVX: case AVX:
return "AVX" return "AVX"
case AVXInline:
return "AVXInline"
case AVX2: case AVX2:
return "AVX2" return "AVX2"
case AVX2Inline: case AVX2Inline:
@ -49,6 +52,8 @@ func NewWith(impl Implementation) hash.Hash {
switch impl { switch impl {
case AVX: case AVX:
return newAVX() return newAVX()
case AVXInline:
return newAVXInline()
case AVX2: case AVX2:
return newAVX2() return newAVX2()
case AVX2Inline: case AVX2Inline:
@ -65,7 +70,7 @@ func New() hash.Hash {
if hasAVX2 { if hasAVX2 {
return newAVX2Inline() return newAVX2Inline()
} else if hasAVX { } else if hasAVX {
return newAVX() return newAVXInline()
} else { } else {
return newPure() return newPure()
} }
@ -78,7 +83,7 @@ func Sum(data []byte) [hashSize]byte {
_, _ = d.Write(data) // no errors _, _ = d.Write(data) // no errors
return d.checkSum() return d.checkSum()
} else if hasAVX { } else if hasAVX {
d := newAVX() d := newAVXInline()
_, _ = d.Write(data) // no errors _, _ = d.Write(data) // no errors
return d.checkSum() return d.checkSum()
} else { } else {

View file

@ -13,6 +13,7 @@ const benchDataSize = 100000
var providers = []Implementation{ var providers = []Implementation{
AVX, AVX,
AVXInline,
AVX2, AVX2,
AVX2Inline, AVX2Inline,
PureGo, PureGo,
@ -22,6 +23,9 @@ func TestNewWith(t *testing.T) {
d := NewWith(AVX) d := NewWith(AVX)
require.IsType(t, (*digest)(nil), d) require.IsType(t, (*digest)(nil), d)
d = NewWith(AVXInline)
require.IsType(t, (*digest4)(nil), d)
d = NewWith(AVX2) d = NewWith(AVX2)
require.IsType(t, (*digest2)(nil), d) require.IsType(t, (*digest2)(nil), d)
@ -53,7 +57,7 @@ var testCases = []struct {
func TestHash(t *testing.T) { func TestHash(t *testing.T) {
for i := range providers { for i := range providers {
p := providers[i] p := providers[i]
t.Run("test "+p.String()+" digest", func(t *testing.T) { t.Run(p.String()+" digest", func(t *testing.T) {
d := NewWith(p) d := NewWith(p)
for _, tc := range testCases { for _, tc := range testCases {
d.Reset() d.Reset()
@ -82,7 +86,7 @@ func BenchmarkSum(b *testing.B) {
for i := range providers { for i := range providers {
p := providers[i] p := providers[i]
b.Run("bench"+p.String()+"digest", func(b *testing.B) { b.Run(p.String()+" digest", func(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
d := NewWith(p) d := NewWith(p)