forked from TrueCloudLab/tzhash
Optimize AVX implementation
1. Do the same mask trick as with AVX2. 2. Get rid of load, generate constant on the fly. ``` name old time/op new time/op delta Sum/AVXInline_digest-8 2.26ms ± 4% 2.17ms ± 5% -4.05% (p=0.000 n=19+17) name old speed new speed delta Sum/AVXInline_digest-8 44.3MB/s ± 4% 46.2MB/s ± 5% +4.25% (p=0.000 n=19+17) ``` Signed-off-by: Evgenii Stratonikov <evgeniy@nspcc.ru>
This commit is contained in:
parent
d4cb61e470
commit
921f8b0579
1 changed files with 22 additions and 13 deletions
|
@ -6,22 +6,15 @@
|
||||||
VPSLLQ $1, FROM, TO \
|
VPSLLQ $1, FROM, TO \
|
||||||
VPALIGNR $8, TO, FROM, R2 \
|
VPALIGNR $8, TO, FROM, R2 \
|
||||||
VPSRLQ $63, R2, R2 \
|
VPSRLQ $63, R2, R2 \
|
||||||
VMOVDQU ·x127x63(SB), R3 \
|
VANDPD TO, X14, R3 \
|
||||||
VANDPD TO, R3, R3 \
|
|
||||||
VPUNPCKHQDQ R3, R3, R3 \
|
VPUNPCKHQDQ R3, R3, R3 \
|
||||||
VXORPD R2, TO, TO \
|
VXORPD R2, TO, TO \
|
||||||
VXORPD R3, TO, TO
|
VXORPD R3, TO, TO
|
||||||
|
|
||||||
#define mask(bit, src, tmp, to1, to2) \
|
#define mask(bit, tmp, to) \
|
||||||
MOVQ src, tmp \
|
VPSRLW bit, X10, tmp \
|
||||||
SHRQ bit, tmp \
|
VPAND X12, tmp, to \ // to = 0x000<bit>000<bit>...
|
||||||
ANDQ $1, tmp \
|
VPSUBW to, X13, to // to = 0xFFFF.. or 0x0000 depending on bit
|
||||||
NEGQ tmp \
|
|
||||||
MOVQ tmp, to1 \
|
|
||||||
VSHUFPS $0, to1, to1, to2
|
|
||||||
// VPBROADCASTB to1, to2
|
|
||||||
// Can't use VPBROADCASTB because it is AVX2 instruction
|
|
||||||
//https://software.intel.com/en-us/forums/intel-isa-extensions/topic/301461
|
|
||||||
|
|
||||||
#define mulBit(bit) \
|
#define mulBit(bit) \
|
||||||
VMOVDQU X0, X8 \
|
VMOVDQU X0, X8 \
|
||||||
|
@ -30,7 +23,7 @@
|
||||||
VXORPD X1, X5, X0 \
|
VXORPD X1, X5, X0 \
|
||||||
mul2(X2, X5, X6, X7) \
|
mul2(X2, X5, X6, X7) \
|
||||||
VXORPD X3, X5, X2 \
|
VXORPD X3, X5, X2 \
|
||||||
mask(bit, CX, DX, X6, X5) \
|
mask(bit, X6, X5) \
|
||||||
VANDPD X0, X5, X1 \
|
VANDPD X0, X5, X1 \
|
||||||
VXORPD X8, X1, X1 \
|
VXORPD X8, X1, X1 \
|
||||||
VANDPD X2, X5, X3 \
|
VANDPD X2, X5, X3 \
|
||||||
|
@ -49,6 +42,11 @@ TEXT ·mulBitRight(SB),NOSPLIT,$0
|
||||||
MOVQ c11+24(FP), DX
|
MOVQ c11+24(FP), DX
|
||||||
VMOVDQU (DX), X3
|
VMOVDQU (DX), X3
|
||||||
|
|
||||||
|
VPXOR X13, X13, X13 // Y13 = 0x0000...
|
||||||
|
VPCMPEQB X14, X14, X14 // Y14 = 0xFFFF...
|
||||||
|
VPSUBQ X14, X13, X13
|
||||||
|
VPSLLQ $63, X13, X14
|
||||||
|
|
||||||
mul2(X0, X5, X6, X7) // c00 *= 2
|
mul2(X0, X5, X6, X7) // c00 *= 2
|
||||||
VXORPD X5, X1, X0 // c00 += c01
|
VXORPD X5, X1, X0 // c00 += c01
|
||||||
mul2(X2, X5, X6, X7) // c10 *= 2
|
mul2(X2, X5, X6, X7) // c10 *= 2
|
||||||
|
@ -77,8 +75,19 @@ TEXT ·mulByteRight(SB),NOSPLIT,$0
|
||||||
VMOVDQU (CX), X2
|
VMOVDQU (CX), X2
|
||||||
MOVQ c11+24(FP), DX
|
MOVQ c11+24(FP), DX
|
||||||
VMOVDQU (DX), X3
|
VMOVDQU (DX), X3
|
||||||
|
MOVQ $0, CX
|
||||||
MOVB b+32(FP), CX
|
MOVB b+32(FP), CX
|
||||||
|
|
||||||
|
VPXOR X13, X13, X13 // X13 = 0x0000...
|
||||||
|
VPCMPEQB X14, X14, X14 // X14 = 0xFFFF...
|
||||||
|
VPSUBQ X14, X13, X10
|
||||||
|
VPSUBW X14, X13, X12 // X12 = 0x00010001... (packed words of 1)
|
||||||
|
VPSLLQ $63, X10, X14 // X14 = 0x10000000... (packed quad-words with HSB set)
|
||||||
|
|
||||||
|
MOVQ CX, X10
|
||||||
|
VPSHUFLW $0, X10, X11
|
||||||
|
VPSHUFD $0, X11, X10
|
||||||
|
|
||||||
mulBit($7)
|
mulBit($7)
|
||||||
mulBit($6)
|
mulBit($6)
|
||||||
mulBit($5)
|
mulBit($5)
|
||||||
|
|
Loading…
Reference in a new issue