+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+ jb .Lloop_ssse3
+
+ mov $_rsp,%rsi
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_ssse3:
+ ret
+.size ${func}_ssse3,.-${func}_ssse3
+___
+}
+
+if ($avx) {{
+######################################################################
+# XOP code path
+#
+if ($SZ==8) { # SHA512 only
+$code.=<<___;
+.type ${func}_xop,\@function,3
+.align 64
+${func}_xop:
+.Lxop_shortcut:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%r11 # copy %rsp
+ shl \$4,%rdx # num*16
+ sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
+ and \$-64,%rsp # align stack frame
+ mov $ctx,$_ctx # save ctx, 1st arg
+ mov $inp,$_inp # save inp, 2nd arh
+ mov %rdx,$_end # save end pointer, "3rd" arg
+ mov %r11,$_rsp # save copy of %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,16*$SZ+32(%rsp)
+ movaps %xmm7,16*$SZ+48(%rsp)
+ movaps %xmm8,16*$SZ+64(%rsp)
+ movaps %xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps %xmm10,16*$SZ+96(%rsp)
+ movaps %xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_xop:
+
+ vzeroupper
+ mov $SZ*0($ctx),$A
+ mov $SZ*1($ctx),$B
+ mov $SZ*2($ctx),$C
+ mov $SZ*3($ctx),$D
+ mov $SZ*4($ctx),$E
+ mov $SZ*5($ctx),$F
+ mov $SZ*6($ctx),$G
+ mov $SZ*7($ctx),$H
+ jmp .Lloop_xop
+___
+ if ($SZ==4) { # SHA256
+ my @X = map("%xmm$_",(0..3));
+ my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+$code.=<<___;
+.align 16
+.Lloop_xop:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[0],@X[0]
+ lea $TABLE(%rip),$Tbl
+ vpshufb $t3,@X[1],@X[1]
+ vpshufb $t3,@X[2],@X[2]
+ vpaddd 0x00($Tbl),@X[0],$t0
+ vpshufb $t3,@X[3],@X[3]
+ vpaddd 0x20($Tbl),@X[1],$t1
+ vpaddd 0x40($Tbl),@X[2],$t2
+ vpaddd 0x60($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x10(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x20(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x30(%rsp)
+ mov $E,$a0
+ jmp .Lxop_00_47
+
+.align 16
+.Lxop_00_47:
+ sub \$`-16*2*$SZ`,$Tbl # size optimization
+___
+sub XOP_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
+
+ &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrld ($t0,$t0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t1);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrld ($t2,@X[3],$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrldq ($t3,$t3,8);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrld ($t2,@X[0],$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpslldq ($t3,$t3,8); # 22 instructions
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &XOP_256_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
+ &jne (".Lxop_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+
+ } else { # SHA512
+ my @X = map("%xmm$_",(0..7));
+ my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+.align 16
+.Lloop_xop:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ lea $TABLE+0x80(%rip),$Tbl # size optimization
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vpshufb $t3,@X[0],@X[0]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[1],@X[1]
+ vmovdqu 0x40($inp),@X[4]
+ vpshufb $t3,@X[2],@X[2]
+ vmovdqu 0x50($inp),@X[5]
+ vpshufb $t3,@X[3],@X[3]
+ vmovdqu 0x60($inp),@X[6]
+ vpshufb $t3,@X[4],@X[4]
+ vmovdqu 0x70($inp),@X[7]
+ vpshufb $t3,@X[5],@X[5]
+ vpaddq -0x80($Tbl),@X[0],$t0
+ vpshufb $t3,@X[6],@X[6]
+ vpaddq -0x60($Tbl),@X[1],$t1
+ vpshufb $t3,@X[7],@X[7]
+ vpaddq -0x40($Tbl),@X[2],$t2
+ vpaddq -0x20($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ vpaddq 0x00($Tbl),@X[4],$t0
+ vmovdqa $t1,0x10(%rsp)
+ vpaddq 0x20($Tbl),@X[5],$t1
+ vmovdqa $t2,0x20(%rsp)
+ vpaddq 0x40($Tbl),@X[6],$t2
+ vmovdqa $t3,0x30(%rsp)
+ vpaddq 0x60($Tbl),@X[7],$t3
+ vmovdqa $t0,0x40(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x50(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x60(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x70(%rsp)
+ mov $E,$a0
+ jmp .Lxop_00_47
+
+.align 16
+.Lxop_00_47:
+ add \$`16*2*$SZ`,$Tbl
+___
+sub XOP_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body); # 52 instructions
+
+ &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrlq ($t0,$t0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t1);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpsrlq ($t2,@X[7],$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<8; $j++) {
+ &XOP_512_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+ &jne (".Lxop_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+}
+$code.=<<___;
+ mov $_ctx,$ctx
+ mov $a1,$A
+
+ add $SZ*0($ctx),$A
+ lea 16*$SZ($inp),$inp
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ add $SZ*6($ctx),$G
+ add $SZ*7($ctx),$H
+
+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+ jb .Lloop_xop
+
+ mov $_rsp,%rsi
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps 16*$SZ+96(%rsp),%xmm10
+ movaps 16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_xop:
+ ret
+.size ${func}_xop,.-${func}_xop
+___
+}
+######################################################################
+# AVX+shrd code path
+#
+local *ror = sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type ${func}_avx,\@function,3
+.align 64
+${func}_avx:
+.Lavx_shortcut:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%r11 # copy %rsp
+ shl \$4,%rdx # num*16
+ sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
+ and \$-64,%rsp # align stack frame
+ mov $ctx,$_ctx # save ctx, 1st arg
+ mov $inp,$_inp # save inp, 2nd arh
+ mov %rdx,$_end # save end pointer, "3rd" arg
+ mov %r11,$_rsp # save copy of %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,16*$SZ+32(%rsp)
+ movaps %xmm7,16*$SZ+48(%rsp)
+ movaps %xmm8,16*$SZ+64(%rsp)
+ movaps %xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps %xmm10,16*$SZ+96(%rsp)
+ movaps %xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx:
+
+ vzeroupper
+ mov $SZ*0($ctx),$A
+ mov $SZ*1($ctx),$B
+ mov $SZ*2($ctx),$C
+ mov $SZ*3($ctx),$D
+ mov $SZ*4($ctx),$E
+ mov $SZ*5($ctx),$F
+ mov $SZ*6($ctx),$G
+ mov $SZ*7($ctx),$H
+___
+ if ($SZ==4) { # SHA256
+ my @X = map("%xmm$_",(0..3));
+ my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
+
+$code.=<<___;
+ vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+ vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+ jmp .Lloop_avx
+.align 16
+.Lloop_avx:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[0],@X[0]
+ lea $TABLE(%rip),$Tbl
+ vpshufb $t3,@X[1],@X[1]
+ vpshufb $t3,@X[2],@X[2]
+ vpaddd 0x00($Tbl),@X[0],$t0
+ vpshufb $t3,@X[3],@X[3]
+ vpaddd 0x20($Tbl),@X[1],$t1
+ vpaddd 0x40($Tbl),@X[2],$t2
+ vpaddd 0x60($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x10(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x20(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x30(%rsp)
+ mov $E,$a0
+ jmp .Lavx_00_47
+
+.align 16
+.Lavx_00_47:
+ sub \$`-16*2*$SZ`,$Tbl # size optimization
+___
+sub Xupdate_256_AVX () {
+ (
+ '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
+ '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
+ '&vpsrld ($t2,$t0,$sigma0[0]);',
+ '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
+ '&vpsrld ($t3,$t0,$sigma0[2])',
+ '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
+ '&vpxor ($t0,$t3,$t2)',
+ '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
+ '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t1)',
+ '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t2)',
+ '&vpsrld ($t2,$t3,$sigma1[2]);',
+ '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
+ '&vpsrlq ($t3,$t3,$sigma1[0]);',
+ '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
+ '&vpxor ($t2,$t2,$t3);',
+ '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
+ '&vpxor ($t2,$t2,$t3)',
+ '&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
+ '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
+ '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
+ '&vpsrld ($t2,$t3,$sigma1[2])',
+ '&vpsrlq ($t3,$t3,$sigma1[0])',
+ '&vpxor ($t2,$t2,$t3);',
+ '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
+ '&vpxor ($t2,$t2,$t3)',
+ '&vpshufb ($t2,$t2,$t5)',
+ '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
+ );
+}
+
+sub AVX_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
+
+ foreach (Xupdate_256_AVX()) { # 29 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &AVX_256_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
+ &jne (".Lavx_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+
+ } else { # SHA512
+ my @X = map("%xmm$_",(0..7));
+ my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+ jmp .Lloop_avx
+.align 16
+.Lloop_avx:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu 0x00($inp),@X[0]
+ lea $TABLE+0x80(%rip),$Tbl # size optimization
+ vmovdqu 0x10($inp),@X[1]
+ vmovdqu 0x20($inp),@X[2]
+ vpshufb $t3,@X[0],@X[0]
+ vmovdqu 0x30($inp),@X[3]
+ vpshufb $t3,@X[1],@X[1]
+ vmovdqu 0x40($inp),@X[4]
+ vpshufb $t3,@X[2],@X[2]
+ vmovdqu 0x50($inp),@X[5]
+ vpshufb $t3,@X[3],@X[3]
+ vmovdqu 0x60($inp),@X[6]
+ vpshufb $t3,@X[4],@X[4]
+ vmovdqu 0x70($inp),@X[7]
+ vpshufb $t3,@X[5],@X[5]
+ vpaddq -0x80($Tbl),@X[0],$t0
+ vpshufb $t3,@X[6],@X[6]
+ vpaddq -0x60($Tbl),@X[1],$t1
+ vpshufb $t3,@X[7],@X[7]
+ vpaddq -0x40($Tbl),@X[2],$t2
+ vpaddq -0x20($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ vpaddq 0x00($Tbl),@X[4],$t0
+ vmovdqa $t1,0x10(%rsp)
+ vpaddq 0x20($Tbl),@X[5],$t1
+ vmovdqa $t2,0x20(%rsp)
+ vpaddq 0x40($Tbl),@X[6],$t2
+ vmovdqa $t3,0x30(%rsp)
+ vpaddq 0x60($Tbl),@X[7],$t3
+ vmovdqa $t0,0x40(%rsp)
+ mov $A,$a1
+ vmovdqa $t1,0x50(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x60(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x70(%rsp)
+ mov $E,$a0
+ jmp .Lavx_00_47
+
+.align 16
+.Lavx_00_47:
+ add \$`16*2*$SZ`,$Tbl
+___
+sub Xupdate_512_AVX () {
+ (
+ '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
+ '&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
+ '&vpsrlq ($t2,$t0,$sigma0[0])',
+ '&vpaddq (@X[0],@X[0],$t3);', # X[0..1] += X[9..10]
+ '&vpsrlq ($t3,$t0,$sigma0[2])',
+ '&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
+ '&vpxor ($t0,$t3,$t2)',
+ '&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t1)',
+ '&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
+ '&vpxor ($t0,$t0,$t2)',
+ '&vpsrlq ($t3,@X[7],$sigma1[2]);',
+ '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
+ '&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1]);',
+ '&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
+ '&vpsrlq ($t1,@X[7],$sigma1[0]);',
+ '&vpxor ($t3,$t3,$t2)',
+ '&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
+ '&vpxor ($t3,$t3,$t1)',
+ '&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
+ '&vpxor ($t3,$t3,$t2)',
+ '&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
+ '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
+ );
+}
+
+sub AVX_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body); # 52 instructions
+
+ foreach (Xupdate_512_AVX()) { # 23 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa (16*$j."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<8; $j++) {
+ &AVX_512_00_47($j,\&body_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+ &jne (".Lavx_00_47");
+
+ for ($i=0; $i<16; ) {
+ foreach(body_00_15()) { eval; }
+ }
+}
+$code.=<<___;
+ mov $_ctx,$ctx
+ mov $a1,$A
+
+ add $SZ*0($ctx),$A
+ lea 16*$SZ($inp),$inp
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ add $SZ*6($ctx),$G
+ add $SZ*7($ctx),$H
+
+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+ jb .Lloop_avx
+
+ mov $_rsp,%rsi
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps 16*$SZ+96(%rsp),%xmm10
+ movaps 16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_avx:
+ ret
+.size ${func}_avx,.-${func}_avx
+___
+
+if ($avx>1) {{
+######################################################################
+# AVX2+BMI code path
+#
+my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
+my $PUSH8=8*2*$SZ;
+use integer;
+
+sub bodyx_00_15 () {
+ # at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
+ (
+ '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+ '&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i]
+ '&and ($a4,$e)', # f&e
+ '&rorx ($a0,$e,$Sigma1[2])',
+ '&rorx ($a2,$e,$Sigma1[1])',
+
+ '&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past
+ '&lea ($h,"($h,$a4)")',
+ '&andn ($a4,$e,$g)', # ~e&g
+ '&xor ($a0,$a2)',
+
+ '&rorx ($a1,$e,$Sigma1[0])',
+ '&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g)
+ '&xor ($a0,$a1)', # Sigma1(e)
+ '&mov ($a2,$a)',
+
+ '&rorx ($a4,$a,$Sigma0[2])',
+ '&lea ($h,"($h,$a0)")', # h+=Sigma1(e)
+ '&xor ($a2,$b)', # a^b, b^c in next round
+ '&rorx ($a1,$a,$Sigma0[1])',
+
+ '&rorx ($a0,$a,$Sigma0[0])',
+ '&lea ($d,"($d,$h)")', # d+=h
+ '&and ($a3,$a2)', # (b^c)&(a^b)
+ '&xor ($a1,$a4)',
+
+ '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
+ '&xor ($a1,$a0)', # Sigma0(a)
+ '&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c)
+ '&mov ($a4,$e)', # copy of f in future
+
+ '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+ );
+ # and at the finish one has to $a+=$a1
+}
+
+$code.=<<___;
+.type ${func}_avx2,\@function,3
+.align 64
+${func}_avx2:
+.Lavx2_shortcut:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%r11 # copy %rsp
+ sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
+ shl \$4,%rdx # num*16
+ and \$-256*$SZ,%rsp # align stack frame
+ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
+ add \$`2*$SZ*($rounds-8)`,%rsp
+ mov $ctx,$_ctx # save ctx, 1st arg
+ mov $inp,$_inp # save inp, 2nd arh
+ mov %rdx,$_end # save end pointer, "3rd" arg
+ mov %r11,$_rsp # save copy of %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,16*$SZ+32(%rsp)
+ movaps %xmm7,16*$SZ+48(%rsp)
+ movaps %xmm8,16*$SZ+64(%rsp)
+ movaps %xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps %xmm10,16*$SZ+96(%rsp)
+ movaps %xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx2:
+
+ vzeroupper
+ sub \$-16*$SZ,$inp # inp++, size optimization
+ mov $SZ*0($ctx),$A
+ mov $inp,%r12 # borrow $T1
+ mov $SZ*1($ctx),$B
+ cmp %rdx,$inp # $_end
+ mov $SZ*2($ctx),$C
+ cmove %rsp,%r12 # next block or random data
+ mov $SZ*3($ctx),$D
+ mov $SZ*4($ctx),$E
+ mov $SZ*5($ctx),$F
+ mov $SZ*6($ctx),$G
+ mov $SZ*7($ctx),$H
+___
+ if ($SZ==4) { # SHA256
+ my @X = map("%ymm$_",(0..3));
+ my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
+
+$code.=<<___;
+ vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+ vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+ jmp .Loop_avx2
+.align 16
+.Loop_avx2:
+ vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
+ vmovdqu -16*$SZ+0($inp),%xmm0
+ vmovdqu -16*$SZ+16($inp),%xmm1
+ vmovdqu -16*$SZ+32($inp),%xmm2
+ vmovdqu -16*$SZ+48($inp),%xmm3
+ #mov $inp,$_inp # offload $inp
+ vinserti128 \$1,(%r12),@X[0],@X[0]
+ vinserti128 \$1,16(%r12),@X[1],@X[1]
+ vpshufb $t3,@X[0],@X[0]
+ vinserti128 \$1,32(%r12),@X[2],@X[2]
+ vpshufb $t3,@X[1],@X[1]
+ vinserti128 \$1,48(%r12),@X[3],@X[3]
+
+ lea $TABLE(%rip),$Tbl
+ vpshufb $t3,@X[2],@X[2]
+ vpaddd 0x00($Tbl),@X[0],$t0
+ vpshufb $t3,@X[3],@X[3]
+ vpaddd 0x20($Tbl),@X[1],$t1
+ vpaddd 0x40($Tbl),@X[2],$t2
+ vpaddd 0x60($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ xor $a1,$a1
+ vmovdqa $t1,0x20(%rsp)
+ lea -$PUSH8(%rsp),%rsp
+ mov $B,$a3
+ vmovdqa $t2,0x00(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x20(%rsp)
+ mov $F,$a4
+ sub \$-16*2*$SZ,$Tbl # size optimization
+ jmp .Lavx2_00_47
+
+.align 16
+.Lavx2_00_47:
+___
+
+sub AVX2_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body); # 96 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+ &lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0);
+ foreach (Xupdate_256_AVX()) { # 29 instructions
+ eval;
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<4; $j++) {
+ &AVX2_256_00_47($j,\&bodyx_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &lea ($Tbl,16*2*$SZ."($Tbl)");
+ &cmpb (($SZ-1)."($Tbl)",0);
+ &jne (".Lavx2_00_47");
+
+ for ($i=0; $i<16; ) {
+ my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+ foreach(bodyx_00_15()) { eval; }
+ }
+ } else { # SHA512
+ my @X = map("%ymm$_",(0..7));
+ my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
+
+$code.=<<___;
+ jmp .Loop_avx2
+.align 16
+.Loop_avx2:
+ vmovdqu -16*$SZ($inp),%xmm0
+ vmovdqu -16*$SZ+16($inp),%xmm1
+ vmovdqu -16*$SZ+32($inp),%xmm2
+ lea $TABLE+0x80(%rip),$Tbl # size optimization
+ vmovdqu -16*$SZ+48($inp),%xmm3
+ vmovdqu -16*$SZ+64($inp),%xmm4
+ vmovdqu -16*$SZ+80($inp),%xmm5
+ vmovdqu -16*$SZ+96($inp),%xmm6
+ vmovdqu -16*$SZ+112($inp),%xmm7
+ #mov $inp,$_inp # offload $inp
+ vmovdqa `$SZ*2*$rounds-0x80`($Tbl),$t2
+ vinserti128 \$1,(%r12),@X[0],@X[0]
+ vinserti128 \$1,16(%r12),@X[1],@X[1]
+ vpshufb $t2,@X[0],@X[0]
+ vinserti128 \$1,32(%r12),@X[2],@X[2]
+ vpshufb $t2,@X[1],@X[1]
+ vinserti128 \$1,48(%r12),@X[3],@X[3]
+ vpshufb $t2,@X[2],@X[2]
+ vinserti128 \$1,64(%r12),@X[4],@X[4]
+ vpshufb $t2,@X[3],@X[3]
+ vinserti128 \$1,80(%r12),@X[5],@X[5]
+ vpshufb $t2,@X[4],@X[4]
+ vinserti128 \$1,96(%r12),@X[6],@X[6]
+ vpshufb $t2,@X[5],@X[5]
+ vinserti128 \$1,112(%r12),@X[7],@X[7]
+
+ vpaddq -0x80($Tbl),@X[0],$t0
+ vpshufb $t2,@X[6],@X[6]
+ vpaddq -0x60($Tbl),@X[1],$t1
+ vpshufb $t2,@X[7],@X[7]
+ vpaddq -0x40($Tbl),@X[2],$t2
+ vpaddq -0x20($Tbl),@X[3],$t3
+ vmovdqa $t0,0x00(%rsp)
+ vpaddq 0x00($Tbl),@X[4],$t0
+ vmovdqa $t1,0x20(%rsp)
+ vpaddq 0x20($Tbl),@X[5],$t1
+ vmovdqa $t2,0x40(%rsp)
+ vpaddq 0x40($Tbl),@X[6],$t2
+ vmovdqa $t3,0x60(%rsp)
+ lea -$PUSH8(%rsp),%rsp
+ vpaddq 0x60($Tbl),@X[7],$t3
+ vmovdqa $t0,0x00(%rsp)
+ xor $a1,$a1
+ vmovdqa $t1,0x20(%rsp)
+ mov $B,$a3
+ vmovdqa $t2,0x40(%rsp)
+ xor $C,$a3 # magic
+ vmovdqa $t3,0x60(%rsp)
+ mov $F,$a4
+ add \$16*2*$SZ,$Tbl
+ jmp .Lavx2_00_47
+
+.align 16
+.Lavx2_00_47:
+___
+
+sub AVX2_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body); # 48 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+ &lea ("%rsp","-$PUSH8(%rsp)") if (($j%4)==0);
+ foreach (Xupdate_512_AVX()) { # 23 instructions
+ eval;
+ if ($_ !~ /\;$/) {
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ }
+ }
+ &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
+ foreach (@insns) { eval; } # remaining instructions
+ &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+ for ($i=0,$j=0; $j<8; $j++) {
+ &AVX2_512_00_47($j,\&bodyx_00_15,@X);
+ push(@X,shift(@X)); # rotate(@X)
+ }
+ &lea ($Tbl,16*2*$SZ."($Tbl)");
+ &cmpb (($SZ-1-0x80)."($Tbl)",0);
+ &jne (".Lavx2_00_47");
+
+ for ($i=0; $i<16; ) {
+ my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+ foreach(bodyx_00_15()) { eval; }
+ }
+}
+$code.=<<___;
+ mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
+ add $a1,$A
+ #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
+ lea `2*$SZ*($rounds-8)`(%rsp),$Tbl
+
+ add $SZ*0($ctx),$A
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ add $SZ*6($ctx),$G
+ add $SZ*7($ctx),$H
+
+ mov $A,$SZ*0($ctx)
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+
+ cmp `$PUSH8+2*8`($Tbl),$inp # $_end
+ je .Ldone_avx2
+
+ xor $a1,$a1
+ mov $B,$a3
+ xor $C,$a3 # magic
+ mov $F,$a4
+ jmp .Lower_avx2
+.align 16
+.Lower_avx2:
+___
+ for ($i=0; $i<8; ) {
+ my $base="+16($Tbl)";
+ foreach(bodyx_00_15()) { eval; }
+ }
+$code.=<<___;
+ lea -$PUSH8($Tbl),$Tbl
+ cmp %rsp,$Tbl
+ jae .Lower_avx2
+
+ mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
+ add $a1,$A
+ #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
+ lea `2*$SZ*($rounds-8)`(%rsp),%rsp
+
+ add $SZ*0($ctx),$A
+ add $SZ*1($ctx),$B
+ add $SZ*2($ctx),$C
+ add $SZ*3($ctx),$D
+ add $SZ*4($ctx),$E
+ add $SZ*5($ctx),$F
+ lea `2*16*$SZ`($inp),$inp # inp+=2
+ add $SZ*6($ctx),$G
+ mov $inp,%r12
+ add $SZ*7($ctx),$H
+ cmp $_end,$inp
+
+ mov $A,$SZ*0($ctx)
+ cmove %rsp,%r12 # next block or stale data
+ mov $B,$SZ*1($ctx)
+ mov $C,$SZ*2($ctx)
+ mov $D,$SZ*3($ctx)
+ mov $E,$SZ*4($ctx)
+ mov $F,$SZ*5($ctx)
+ mov $G,$SZ*6($ctx)
+ mov $H,$SZ*7($ctx)
+
+ jbe .Loop_avx2
+ lea (%rsp),$Tbl
+
+.Ldone_avx2:
+ lea ($Tbl),%rsp
+ mov $_rsp,%rsi
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps 16*$SZ+32(%rsp),%xmm6
+ movaps 16*$SZ+48(%rsp),%xmm7
+ movaps 16*$SZ+64(%rsp),%xmm8
+ movaps 16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+ movaps 16*$SZ+96(%rsp),%xmm10
+ movaps 16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lepilogue_avx2:
+ ret
+.size ${func}_avx2,.-${func}_avx2
+___
+}}
+}}}}}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HanderlData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+___
+$code.=<<___ if ($avx>1);
+ lea .Lavx2_shortcut(%rip),%r10
+ cmp %r10,%rbx # context->Rip<avx2_shortcut
+ jb .Lnot_in_avx2
+
+ and \$-256*$SZ,%rax
+ add \$`2*$SZ*($rounds-8)`,%rax
+.Lnot_in_avx2:
+___
+$code.=<<___;
+ mov %rax,%rsi # put aside Rsp
+ mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+ lea .Lepilogue(%rip),%r10
+ cmp %r10,%rbx
+ jb .Lin_prologue # non-AVX code
+
+ lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$`$SZ==4?8:12`,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame