diff --git a/tz/digest_avx2_amd64.s b/tz/digest_avx2_amd64.s index 2d818de..5836344 100644 --- a/tz/digest_avx2_amd64.s +++ b/tz/digest_avx2_amd64.s @@ -1,61 +1,56 @@ #include "textflag.h" -#define mask(bit, tmp, to) \ - VPSRLW bit, Y10, tmp \ - VPAND Y12, tmp, to \ // to = 0x000000... - VPSUBW to, Y13, to // to = 0xFFFF.. or 0x0000 depending on bit - #define mulBit(bit, in_1, in_2, out_1, out_2) \ + VPSLLW bit, Y10, Y11 \ VPSLLQ $1, in_1, Y1 \ + VPSRAW $15, Y11, Y12 \ VPALIGNR $8, Y1, in_1, Y2 \ - VPSRLQ $63, Y2, Y2 \ - VPXOR Y1, Y2, Y2 \ VPAND Y1, Y14, Y3 \ + VPSRLQ $63, Y2, Y2 \ VPUNPCKHQDQ Y3, Y3, Y3 \ - VPXOR Y2, Y3, Y3 \ - mask(bit, Y11, Y2) \ + VPXOR Y1, Y2, Y7 \ VPXOR Y3, in_2, out_1 \ - VPAND out_1, Y2, Y4 \ + VPXOR Y7, out_1, out_1 \ + VPAND out_1, Y12, Y4 \ VPXOR Y4, in_1, out_2 \ // func mulByteSliceRightx2(c00c10, c01c11 *[4]uint64, n int, data *byte) TEXT ·mulByteSliceRightx2(SB), NOSPLIT, $0 - MOVQ c00c10+0(FP), AX - VMOVDQU (AX), Y0 - MOVQ c01c11+8(FP), BX - VMOVDQU (BX), Y8 + MOVQ c00c10+0(FP), AX + MOVQ c01c11+8(FP), BX VPXOR Y13, Y13, Y13 // Y13 = 0x0000... VPCMPEQB Y14, Y14, Y14 // Y14 = 0xFFFF... VPSUBQ Y14, Y13, Y10 - VPSUBW Y14, Y13, Y12 // Y12 = 0x00010001... (packed words of 1) VPSLLQ $63, Y10, Y14 // Y14 = 0x10000000... (packed quad-words with HSB set) MOVQ n+16(FP), CX MOVQ data+24(FP), DX + VMOVDQU (AX), Y0 + VMOVDQU (BX), Y8 + loop: CMPQ CX, $0 JEQ finish - SUBQ $1, CX - VPBROADCASTB (DX), X10 // X10 = packed bytes of b. - VPMOVZXBW X10, Y10 // Extend with zeroes to packed words. + VPBROADCASTB (DX), Y10 ADDQ $1, DX + SUBQ $1, CX - mulBit($7, Y0, Y8, Y5, Y6) - mulBit($6, Y5, Y6, Y0, Y8) - mulBit($5, Y0, Y8, Y5, Y6) - mulBit($4, Y5, Y6, Y0, Y8) - mulBit($3, Y0, Y8, Y5, Y6) - mulBit($2, Y5, Y6, Y0, Y8) - mulBit($1, Y0, Y8, Y5, Y6) - mulBit($0, Y5, Y6, Y0, Y8) + mulBit($8, Y0, Y8, Y5, Y6) + mulBit($9, Y5, Y6, Y0, Y8) + mulBit($10, Y0, Y8, Y5, Y6) + mulBit($11, Y5, Y6, Y0, Y8) + mulBit($12, Y0, Y8, Y5, Y6) + mulBit($13, Y5, Y6, Y0, Y8) + mulBit($14, Y0, Y8, Y5, Y6) + mulBit($15, Y5, Y6, Y0, Y8) JMP loop finish: - VMOVDQU Y8, (BX) VMOVDQU Y0, (AX) + VMOVDQU Y8, (BX) RET