tz: optimize AVX2 implementation

1. Perform masking with 2 instructions instead of 3 (use arithmetic
   shift).
2. Broadcast data byte in one instruction at the start of byte-processing
3. Reorder instructions to reduce the amount of data hazards and resources
   contention.

```
name               old time/op    new time/op    delta
Sum/AVX2_digest-8    1.39ms ± 0%    1.22ms ± 0%  -12.18%  (p=0.000 n=9+7)

name               old speed      new speed      delta
Sum/AVX2_digest-8  71.7MB/s ± 0%  81.7MB/s ± 0%  +13.87%  (p=0.000 n=9+7)
```

Signed-off-by: Evgenii Stratonikov <evgeniy@nspcc.ru>
This commit is contained in:
Evgenii Stratonikov 2022-03-10 22:44:16 +03:00 committed by Alex Vanin
parent defa61ce8f
commit 3de3046074

View file

@ -1,61 +1,56 @@
#include "textflag.h" #include "textflag.h"
#define mask(bit, tmp, to) \
VPSRLW bit, Y10, tmp \
VPAND Y12, tmp, to \ // to = 0x000<bit>000<bit>...
VPSUBW to, Y13, to // to = 0xFFFF.. or 0x0000 depending on bit
#define mulBit(bit, in_1, in_2, out_1, out_2) \ #define mulBit(bit, in_1, in_2, out_1, out_2) \
VPSLLW bit, Y10, Y11 \
VPSLLQ $1, in_1, Y1 \ VPSLLQ $1, in_1, Y1 \
VPSRAW $15, Y11, Y12 \
VPALIGNR $8, Y1, in_1, Y2 \ VPALIGNR $8, Y1, in_1, Y2 \
VPSRLQ $63, Y2, Y2 \
VPXOR Y1, Y2, Y2 \
VPAND Y1, Y14, Y3 \ VPAND Y1, Y14, Y3 \
VPSRLQ $63, Y2, Y2 \
VPUNPCKHQDQ Y3, Y3, Y3 \ VPUNPCKHQDQ Y3, Y3, Y3 \
VPXOR Y2, Y3, Y3 \ VPXOR Y1, Y2, Y7 \
mask(bit, Y11, Y2) \
VPXOR Y3, in_2, out_1 \ VPXOR Y3, in_2, out_1 \
VPAND out_1, Y2, Y4 \ VPXOR Y7, out_1, out_1 \
VPAND out_1, Y12, Y4 \
VPXOR Y4, in_1, out_2 \ VPXOR Y4, in_1, out_2 \
// func mulByteSliceRightx2(c00c10, c01c11 *[4]uint64, n int, data *byte) // func mulByteSliceRightx2(c00c10, c01c11 *[4]uint64, n int, data *byte)
TEXT ·mulByteSliceRightx2(SB), NOSPLIT, $0 TEXT ·mulByteSliceRightx2(SB), NOSPLIT, $0
MOVQ c00c10+0(FP), AX MOVQ c00c10+0(FP), AX
VMOVDQU (AX), Y0 MOVQ c01c11+8(FP), BX
MOVQ c01c11+8(FP), BX
VMOVDQU (BX), Y8
VPXOR Y13, Y13, Y13 // Y13 = 0x0000... VPXOR Y13, Y13, Y13 // Y13 = 0x0000...
VPCMPEQB Y14, Y14, Y14 // Y14 = 0xFFFF... VPCMPEQB Y14, Y14, Y14 // Y14 = 0xFFFF...
VPSUBQ Y14, Y13, Y10 VPSUBQ Y14, Y13, Y10
VPSUBW Y14, Y13, Y12 // Y12 = 0x00010001... (packed words of 1)
VPSLLQ $63, Y10, Y14 // Y14 = 0x10000000... (packed quad-words with HSB set) VPSLLQ $63, Y10, Y14 // Y14 = 0x10000000... (packed quad-words with HSB set)
MOVQ n+16(FP), CX MOVQ n+16(FP), CX
MOVQ data+24(FP), DX MOVQ data+24(FP), DX
VMOVDQU (AX), Y0
VMOVDQU (BX), Y8
loop: loop:
CMPQ CX, $0 CMPQ CX, $0
JEQ finish JEQ finish
SUBQ $1, CX
VPBROADCASTB (DX), X10 // X10 = packed bytes of b. VPBROADCASTB (DX), Y10
VPMOVZXBW X10, Y10 // Extend with zeroes to packed words.
ADDQ $1, DX ADDQ $1, DX
SUBQ $1, CX
mulBit($7, Y0, Y8, Y5, Y6) mulBit($8, Y0, Y8, Y5, Y6)
mulBit($6, Y5, Y6, Y0, Y8) mulBit($9, Y5, Y6, Y0, Y8)
mulBit($5, Y0, Y8, Y5, Y6) mulBit($10, Y0, Y8, Y5, Y6)
mulBit($4, Y5, Y6, Y0, Y8) mulBit($11, Y5, Y6, Y0, Y8)
mulBit($3, Y0, Y8, Y5, Y6) mulBit($12, Y0, Y8, Y5, Y6)
mulBit($2, Y5, Y6, Y0, Y8) mulBit($13, Y5, Y6, Y0, Y8)
mulBit($1, Y0, Y8, Y5, Y6) mulBit($14, Y0, Y8, Y5, Y6)
mulBit($0, Y5, Y6, Y0, Y8) mulBit($15, Y5, Y6, Y0, Y8)
JMP loop JMP loop
finish: finish:
VMOVDQU Y8, (BX)
VMOVDQU Y0, (AX) VMOVDQU Y0, (AX)
VMOVDQU Y8, (BX)
RET RET