2 # Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # SHA256 block transform for x86. September 2007.
19 # Performance improvement over compiler generated code varies from
20 # 10% to 40% [see below]. Not very impressive on some ยต-archs, but
21 # it's 5 times smaller and optimizies amount of writes.
25 # Optimization including two of Pavel Semjanov's ideas, alternative
26 # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
27 # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
28 # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
29 # on P4, where it kills performance, nor Sandy Bridge, where folded
30 # loop is approximately as fast...
34 # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
35 # May version, >60% over original. Add AVX+shrd code path, >25%
36 # improvement on Sandy Bridge over May version, 60% over original.
40 # Replace AMD XOP code path with SSSE3 to cover more processors.
41 # (Biggest improvement coefficient is on upcoming Atom Silvermont,
42 # not shown.) Add AVX+BMI code path.
46 # Add support for Intel SHA Extensions.
48 # Performance in clock cycles per processed byte (less is better):
50 # gcc icc x86 asm(*) SIMD x86_64 asm(**)
51 # Pentium 46 57 40/38 - -
52 # PIII 36 33 27/24 - -
54 # AMD K8 27 25 19/15.5 - 14.9
55 # Core2 26 23 18/15.6 14.3 13.8
56 # Westmere 27 - 19/15.7 13.4 12.3
57 # Sandy Bridge 25 - 15.9 12.4 11.6
58 # Ivy Bridge 24 - 15.0 11.4 10.3
59 # Haswell 22 - 13.9 9.46 7.80
60 # Bulldozer 36 - 27/22 17.0 13.6
61 # VIA Nano 36 - 25/22 16.8 16.5
62 # Atom 50 - 30/25 21.9 18.9
63 # Silvermont 40 - 34/31 22.9 20.6
65 # (*) numbers after slash are for unrolled loop, where applicable;
66 # (**) x86_64 assembly performance is presented for reference
67 # purposes, results are best-available;
69 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
70 push(@INC,"${dir}","${dir}../../perlasm");
74 open STDOUT,">$output";
76 &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
79 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
81 if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
82 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
83 $avx = ($1>=2.19) + ($1>=2.22);
86 if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
87 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
88 $avx = ($1>=2.03) + ($1>=2.10);
91 if ($xmm && !$avx && $ARGV[0] eq "win32" &&
92 `ml 2>&1` =~ /Version ([0-9]+)\./) {
93 $avx = ($1>=10) + ($1>=11);
96 if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
97 $avx = ($2>=3.0) + ($2>3.0);
100 $shaext=$xmm; ### set to zero if compiling for 1.0.1
102 $unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
103 # fully unrolled loop was measured to run about
104 # 3-4x slower. If slowdown coefficient is N and
105 # unrolled loop is m times faster, then you break
106 # even at (N-1)/(m-1) blocks. Then it needs to be
107 # adjusted for probability of code being evicted,
108 # code size/cache size=1/4. Typical m is 1.15...
115 $Coff=&DWP(12,"esp");
116 $Doff=&DWP(16,"esp");
117 $Eoff=&DWP(20,"esp");
118 $Foff=&DWP(24,"esp");
119 $Goff=&DWP(28,"esp");
120 $Hoff=&DWP(32,"esp");
121 $Xoff=&DWP(36,"esp");
125 &mov ($T,"ecx"); # "ecx" is preloaded
126 &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
134 &xor ($T,"ecx"); # T = sigma0(X[-15])
136 &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
138 &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
139 #&xor ("edi","esi") # sigma1(X[-2])
140 # &add ($T,"edi"); # T += sigma1(X[-2])
141 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
149 &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
152 &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
156 &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
157 &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
160 &mov ($Eoff,$E); # modulo-scheduled
162 &add ($T,$Hoff); # T += h
163 &xor ("esi","edi"); # Ch(e,f,g)
164 &ror ($E,6); # Sigma1(e)
166 &add ($T,"esi"); # T += Ch(e,f,g)
169 &add ($T,$E); # T += Sigma1(e)
172 &mov ($Aoff,$A); # modulo-scheduled
173 &lea ("esp",&DWP(-4,"esp"));
175 &mov ("esi",&DWP(0,$K256));
177 &mov ($E,$Eoff); # e in next iteration, d in this one
178 &xor ($A,"edi"); # a ^= b
179 &ror ("ecx",2); # Sigma0(a)
181 &add ($T,"esi"); # T+= K[i]
182 &mov (&DWP(0,"esp"),$A); # (b^c) in next round
183 &add ($E,$T); # d += T
184 &and ($A,&DWP(4,"esp")); # a &= (b^c)
185 &add ($T,"ecx"); # T += Sigma0(a)
186 &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
187 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
189 &add ($A,$T); # h += T
192 &external_label("OPENSSL_ia32cap_P") if (!$i386);
194 &function_begin("sha256_block_data_order");
195 &mov ("esi",wparam(0)); # ctx
196 &mov ("edi",wparam(1)); # inp
197 &mov ("eax",wparam(2)); # num
198 &mov ("ebx","esp"); # saved sp
200 &call (&label("pic_point")); # make it PIC!
201 &set_label("pic_point");
203 &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
210 &mov (&DWP(0,"esp"),"esi"); # ctx
211 &mov (&DWP(4,"esp"),"edi"); # inp
212 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
213 &mov (&DWP(12,"esp"),"ebx"); # saved sp
214 if (!$i386 && $xmm) {
215 &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
216 &mov ("ecx",&DWP(0,"edx"));
217 &mov ("ebx",&DWP(4,"edx"));
218 &test ("ecx",1<<20); # check for P4
219 &jnz (&label("loop"));
220 &mov ("edx",&DWP(8,"edx")) if ($xmm);
221 &test ("ecx",1<<24); # check for FXSR
222 &jz ($unroll_after?&label("no_xmm"):&label("loop"));
223 &and ("ecx",1<<30); # mask "Intel CPU" bit
224 &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
225 &test ("edx",1<<29) if ($shaext); # check for SHA
226 &jnz (&label("shaext")) if ($shaext);
228 &and ("ecx",1<<28|1<<30);
229 &cmp ("ecx",1<<28|1<<30);
231 &je (&label("AVX")) if ($avx);
232 &test ("ebx",1<<9); # check for SSSE3
233 &jnz (&label("SSSE3"));
235 &je (&label("loop_shrd"));
238 &set_label("no_xmm");
240 &cmp ("eax",$unroll_after);
241 &jae (&label("unrolled"));
243 &jmp (&label("loop"));
248 &set_label("loop$suffix",$suffix?32:16);
249 # copy input block to stack reversing byte and dword order
250 for($i=0;$i<4;$i++) {
251 &mov ("eax",&DWP($i*16+0,"edi"));
252 &mov ("ebx",&DWP($i*16+4,"edi"));
253 &mov ("ecx",&DWP($i*16+8,"edi"));
255 &mov ("edx",&DWP($i*16+12,"edi"));
265 &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
266 &mov (&DWP(4*(9+16)+4,"esp"),"edi");
268 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
269 &mov ($A,&DWP(0,"esi"));
270 &mov ("ebx",&DWP(4,"esi"));
271 &mov ("ecx",&DWP(8,"esi"));
272 &mov ("edi",&DWP(12,"esi"));
278 &mov (&DWP(0,"esp"),"ebx"); # magic
279 &mov ($E,&DWP(16,"esi"));
280 &mov ("ebx",&DWP(20,"esi"));
281 &mov ("ecx",&DWP(24,"esi"));
282 &mov ("edi",&DWP(28,"esi"));
288 &set_label("00_15$suffix",16);
292 &cmp ("esi",0xc19bf174);
293 &jne (&label("00_15$suffix"));
295 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
296 &jmp (&label("16_63$suffix"));
298 &set_label("16_63$suffix",16);
302 &cmp ("esi",0xc67178f2);
303 &jne (&label("16_63$suffix"));
305 &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
308 # &mov ("edi",$Coff);
310 &add ($A,&DWP(0,"esi"));
311 &add ("ebx",&DWP(4,"esi"));
312 &add ("edi",&DWP(8,"esi"));
313 &add ("ecx",&DWP(12,"esi"));
314 &mov (&DWP(0,"esi"),$A);
315 &mov (&DWP(4,"esi"),"ebx");
316 &mov (&DWP(8,"esi"),"edi");
317 &mov (&DWP(12,"esi"),"ecx");
322 &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
323 &add ($E,&DWP(16,"esi"));
324 &add ("eax",&DWP(20,"esi"));
325 &add ("ebx",&DWP(24,"esi"));
326 &add ("ecx",&DWP(28,"esi"));
327 &mov (&DWP(16,"esi"),$E);
328 &mov (&DWP(20,"esi"),"eax");
329 &mov (&DWP(24,"esi"),"ebx");
330 &mov (&DWP(28,"esi"),"ecx");
332 &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
333 &sub ($K256,4*64); # rewind K
335 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
336 &jb (&label("loop$suffix"));
339 &mov ("esp",&DWP(12,"esp")); # restore sp
341 if (!$i386 && !$xmm) {
342 # ~20% improvement on Sandy Bridge
343 local *ror = sub { &shrd(@_[0],@_) };
344 &COMPACT_LOOP("_shrd");
345 &mov ("esp",&DWP(12,"esp")); # restore sp
349 &set_label("K256",64); # Yes! I keep it in the code segment!
350 @K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
351 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
352 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
353 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
354 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
355 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
356 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
357 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
358 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
359 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
360 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
361 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
362 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
363 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
364 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
365 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
367 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
368 &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
370 ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
371 sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
373 if (!$i386 && $unroll_after) {
376 &set_label("unrolled",16);
377 &lea ("esp",&DWP(-96,"esp"));
378 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
379 &mov ($AH[0],&DWP(0,"esi"));
380 &mov ($AH[1],&DWP(4,"esi"));
381 &mov ("ecx",&DWP(8,"esi"));
382 &mov ("ebx",&DWP(12,"esi"));
383 #&mov (&DWP(0,"esp"),$AH[0]);
384 &mov (&DWP(4,"esp"),$AH[1]);
385 &xor ($AH[1],"ecx"); # magic
386 &mov (&DWP(8,"esp"),"ecx");
387 &mov (&DWP(12,"esp"),"ebx");
388 &mov ($E,&DWP(16,"esi"));
389 &mov ("ebx",&DWP(20,"esi"));
390 &mov ("ecx",&DWP(24,"esi"));
391 &mov ("esi",&DWP(28,"esi"));
392 #&mov (&DWP(16,"esp"),$E);
393 &mov (&DWP(20,"esp"),"ebx");
394 &mov (&DWP(24,"esp"),"ecx");
395 &mov (&DWP(28,"esp"),"esi");
396 &jmp (&label("grand_loop"));
398 &set_label("grand_loop",16);
399 # copy input block to stack reversing byte order
400 for($i=0;$i<5;$i++) {
401 &mov ("ebx",&DWP(12*$i+0,"edi"));
402 &mov ("ecx",&DWP(12*$i+4,"edi"));
404 &mov ("esi",&DWP(12*$i+8,"edi"));
406 &mov (&DWP(32+12*$i+0,"esp"),"ebx");
408 &mov (&DWP(32+12*$i+4,"esp"),"ecx");
409 &mov (&DWP(32+12*$i+8,"esp"),"esi");
411 &mov ("ebx",&DWP($i*12,"edi"));
414 &mov (&DWP(96+4,"esp"),"edi");
415 &mov (&DWP(32+12*$i,"esp"),"ebx");
417 my ($t1,$t2) = ("ecx","esi");
419 for ($i=0;$i<64;$i++) {
422 &mov ($T,$t1); # $t1 is preloaded
423 # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
431 &xor ($T,$t1); # T = sigma0(X[-15])
433 &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
435 &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
436 #&xor ("edi",$t2) # sigma1(X[-2])
437 # &add ($T,"edi"); # T += sigma1(X[-2])
438 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
441 &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
444 &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
445 &mov ("edi",&off($g));
447 &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
448 &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
452 &mov (&off($e),$t1); # save $E, modulo-scheduled
454 &add ($T,&off($h)); # T += h
455 &xor ("edi",$t2); # Ch(e,f,g)
456 &ror ($E,6); # Sigma1(e)
458 &add ($T,"edi"); # T += Ch(e,f,g)
462 &mov ("edi",&off($b));
464 &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
465 &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
467 &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
468 &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
470 &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
471 &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
472 &ror ($t1,2); # Sigma0(a)
474 &add ($AH[1],$E); # h += T
475 &add ($E,&off($d)); # d += T
476 &add ($AH[1],$t1); # h += Sigma0(a)
477 &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
479 @AH = reverse(@AH); # rotate(a,h)
480 ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
482 &mov ("esi",&DWP(96,"esp")); #ctx
483 #&mov ($AH[0],&DWP(0,"esp"));
484 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
485 #&mov ("edi", &DWP(8,"esp"));
486 &mov ("ecx",&DWP(12,"esp"));
487 &add ($AH[0],&DWP(0,"esi"));
488 &add ($AH[1],&DWP(4,"esi"));
489 &add ("edi",&DWP(8,"esi"));
490 &add ("ecx",&DWP(12,"esi"));
491 &mov (&DWP(0,"esi"),$AH[0]);
492 &mov (&DWP(4,"esi"),$AH[1]);
493 &mov (&DWP(8,"esi"),"edi");
494 &mov (&DWP(12,"esi"),"ecx");
495 #&mov (&DWP(0,"esp"),$AH[0]);
496 &mov (&DWP(4,"esp"),$AH[1]);
497 &xor ($AH[1],"edi"); # magic
498 &mov (&DWP(8,"esp"),"edi");
499 &mov (&DWP(12,"esp"),"ecx");
500 #&mov ($E,&DWP(16,"esp"));
501 &mov ("edi",&DWP(20,"esp"));
502 &mov ("ebx",&DWP(24,"esp"));
503 &mov ("ecx",&DWP(28,"esp"));
504 &add ($E,&DWP(16,"esi"));
505 &add ("edi",&DWP(20,"esi"));
506 &add ("ebx",&DWP(24,"esi"));
507 &add ("ecx",&DWP(28,"esi"));
508 &mov (&DWP(16,"esi"),$E);
509 &mov (&DWP(20,"esi"),"edi");
510 &mov (&DWP(24,"esi"),"ebx");
511 &mov (&DWP(28,"esi"),"ecx");
512 #&mov (&DWP(16,"esp"),$E);
513 &mov (&DWP(20,"esp"),"edi");
514 &mov ("edi",&DWP(96+4,"esp")); # inp
515 &mov (&DWP(24,"esp"),"ebx");
516 &mov (&DWP(28,"esp"),"ecx");
518 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
519 &jb (&label("grand_loop"));
521 &mov ("esp",&DWP(96+12,"esp")); # restore sp
524 if (!$i386 && $xmm) {{{
526 ######################################################################
527 # Intel SHA Extensions implementation of SHA256 update function.
529 my ($ctx,$inp,$end)=("esi","edi","eax");
530 my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
531 my @MSG=map("xmm$_",(3..6));
534 my ($opcodelet,$dst,$src)=@_;
535 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
536 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
538 sub sha256rnds2 { sha256op38(0xcb,@_); }
539 sub sha256msg1 { sha256op38(0xcc,@_); }
540 sub sha256msg2 { sha256op38(0xcd,@_); }
542 &set_label("shaext",32);
545 &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
546 &lea ($K256,&DWP(0x80,$K256));
547 &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
548 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
550 &pshufd ($Wi,$ABEF,0x1b); # ABCD
551 &pshufd ($ABEF,$ABEF,0xb1); # CDAB
552 &pshufd ($CDGH,$CDGH,0x1b); # EFGH
553 &palignr ($ABEF,$CDGH,8); # ABEF
554 &punpcklqdq ($CDGH,$Wi); # CDGH
555 &jmp (&label("loop_shaext"));
557 &set_label("loop_shaext",16);
558 &movdqu (@MSG[0],&QWP(0,$inp));
559 &movdqu (@MSG[1],&QWP(0x10,$inp));
560 &movdqu (@MSG[2],&QWP(0x20,$inp));
561 &pshufb (@MSG[0],$TMP);
562 &movdqu (@MSG[3],&QWP(0x30,$inp));
563 &movdqa (&QWP(16,"esp"),$CDGH); # offload
565 &movdqa ($Wi,&QWP(0*16-0x80,$K256));
566 &paddd ($Wi,@MSG[0]);
567 &pshufb (@MSG[1],$TMP);
568 &sha256rnds2 ($CDGH,$ABEF); # 0-3
569 &pshufd ($Wi,$Wi,0x0e);
571 &movdqa (&QWP(0,"esp"),$ABEF); # offload
572 &sha256rnds2 ($ABEF,$CDGH);
574 &movdqa ($Wi,&QWP(1*16-0x80,$K256));
575 &paddd ($Wi,@MSG[1]);
576 &pshufb (@MSG[2],$TMP);
577 &sha256rnds2 ($CDGH,$ABEF); # 4-7
578 &pshufd ($Wi,$Wi,0x0e);
579 &lea ($inp,&DWP(0x40,$inp));
580 &sha256msg1 (@MSG[0],@MSG[1]);
581 &sha256rnds2 ($ABEF,$CDGH);
583 &movdqa ($Wi,&QWP(2*16-0x80,$K256));
584 &paddd ($Wi,@MSG[2]);
585 &pshufb (@MSG[3],$TMP);
586 &sha256rnds2 ($CDGH,$ABEF); # 8-11
587 &pshufd ($Wi,$Wi,0x0e);
588 &movdqa ($TMP,@MSG[3]);
589 &palignr ($TMP,@MSG[2],4);
591 &paddd (@MSG[0],$TMP);
592 &sha256msg1 (@MSG[1],@MSG[2]);
593 &sha256rnds2 ($ABEF,$CDGH);
595 &movdqa ($Wi,&QWP(3*16-0x80,$K256));
596 &paddd ($Wi,@MSG[3]);
597 &sha256msg2 (@MSG[0],@MSG[3]);
598 &sha256rnds2 ($CDGH,$ABEF); # 12-15
599 &pshufd ($Wi,$Wi,0x0e);
600 &movdqa ($TMP,@MSG[0]);
601 &palignr ($TMP,@MSG[3],4);
603 &paddd (@MSG[1],$TMP);
604 &sha256msg1 (@MSG[2],@MSG[3]);
605 &sha256rnds2 ($ABEF,$CDGH);
607 for($i=4;$i<16-3;$i++) {
608 &movdqa ($Wi,&QWP($i*16-0x80,$K256));
609 &paddd ($Wi,@MSG[0]);
610 &sha256msg2 (@MSG[1],@MSG[0]);
611 &sha256rnds2 ($CDGH,$ABEF); # 16-19...
612 &pshufd ($Wi,$Wi,0x0e);
613 &movdqa ($TMP,@MSG[1]);
614 &palignr ($TMP,@MSG[0],4);
616 &paddd (@MSG[2],$TMP);
617 &sha256msg1 (@MSG[3],@MSG[0]);
618 &sha256rnds2 ($ABEF,$CDGH);
620 push(@MSG,shift(@MSG));
622 &movdqa ($Wi,&QWP(13*16-0x80,$K256));
623 &paddd ($Wi,@MSG[0]);
624 &sha256msg2 (@MSG[1],@MSG[0]);
625 &sha256rnds2 ($CDGH,$ABEF); # 52-55
626 &pshufd ($Wi,$Wi,0x0e);
627 &movdqa ($TMP,@MSG[1])
628 &palignr ($TMP,@MSG[0],4);
629 &sha256rnds2 ($ABEF,$CDGH);
630 &paddd (@MSG[2],$TMP);
632 &movdqa ($Wi,&QWP(14*16-0x80,$K256));
633 &paddd ($Wi,@MSG[1]);
634 &sha256rnds2 ($CDGH,$ABEF); # 56-59
635 &pshufd ($Wi,$Wi,0x0e);
636 &sha256msg2 (@MSG[2],@MSG[1]);
637 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
638 &sha256rnds2 ($ABEF,$CDGH);
640 &movdqa ($Wi,&QWP(15*16-0x80,$K256));
641 &paddd ($Wi,@MSG[2]);
643 &sha256rnds2 ($CDGH,$ABEF); # 60-63
644 &pshufd ($Wi,$Wi,0x0e);
647 &sha256rnds2 ($ABEF,$CDGH);
649 &paddd ($CDGH,&QWP(16,"esp"));
650 &paddd ($ABEF,&QWP(0,"esp"));
651 &jnz (&label("loop_shaext"));
653 &pshufd ($CDGH,$CDGH,0xb1); # DCHG
654 &pshufd ($TMP,$ABEF,0x1b); # FEBA
655 &pshufd ($ABEF,$ABEF,0xb1); # BAFE
656 &punpckhqdq ($ABEF,$CDGH); # DCBA
657 &palignr ($CDGH,$TMP,8); # HGFE
659 &mov ("esp",&DWP(32+12,"esp"));
660 &movdqu (&QWP(0,$ctx),$ABEF);
661 &movdqu (&QWP(16,$ctx),$CDGH);
665 my @X = map("xmm$_",(0..3));
666 my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
669 &set_label("SSSE3",32);
670 &lea ("esp",&DWP(-96,"esp"));
671 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
672 &mov ($AH[0],&DWP(0,"esi"));
673 &mov ($AH[1],&DWP(4,"esi"));
674 &mov ("ecx",&DWP(8,"esi"));
675 &mov ("edi",&DWP(12,"esi"));
676 #&mov (&DWP(0,"esp"),$AH[0]);
677 &mov (&DWP(4,"esp"),$AH[1]);
678 &xor ($AH[1],"ecx"); # magic
679 &mov (&DWP(8,"esp"),"ecx");
680 &mov (&DWP(12,"esp"),"edi");
681 &mov ($E,&DWP(16,"esi"));
682 &mov ("edi",&DWP(20,"esi"));
683 &mov ("ecx",&DWP(24,"esi"));
684 &mov ("esi",&DWP(28,"esi"));
685 #&mov (&DWP(16,"esp"),$E);
686 &mov (&DWP(20,"esp"),"edi");
687 &mov ("edi",&DWP(96+4,"esp")); # inp
688 &mov (&DWP(24,"esp"),"ecx");
689 &mov (&DWP(28,"esp"),"esi");
690 &movdqa ($t3,&QWP(256,$K256));
691 &jmp (&label("grand_ssse3"));
693 &set_label("grand_ssse3",16);
694 # load input, reverse byte order, add K256[0..15], save to stack
695 &movdqu (@X[0],&QWP(0,"edi"));
696 &movdqu (@X[1],&QWP(16,"edi"));
697 &movdqu (@X[2],&QWP(32,"edi"));
698 &movdqu (@X[3],&QWP(48,"edi"));
701 &mov (&DWP(96+4,"esp"),"edi");
703 &movdqa ($t0,&QWP(0,$K256));
705 &movdqa ($t1,&QWP(16,$K256));
708 &movdqa ($t2,&QWP(32,$K256));
710 &movdqa ($t3,&QWP(48,$K256));
711 &movdqa (&QWP(32+0,"esp"),$t0);
713 &movdqa (&QWP(32+16,"esp"),$t1);
715 &movdqa (&QWP(32+32,"esp"),$t2);
716 &movdqa (&QWP(32+48,"esp"),$t3);
717 &jmp (&label("ssse3_00_47"));
719 &set_label("ssse3_00_47",16);
726 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
730 eval(shift(@insns)); # @
735 &palignr ($t0,@X[0],4); # X[1..4]
737 eval(shift(@insns)); # @
739 &palignr ($t3,@X[2],4); # X[9..12]
744 eval(shift(@insns)); # @
751 eval(shift(@insns)); # @
752 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
758 eval(shift(@insns)); # @
760 &pshufd ($t3,@X[3],0b11111010); # X[14..15]
765 eval(shift(@insns)); # @
772 eval(shift(@insns)); # @
779 eval(shift(@insns)); # @
786 eval(shift(@insns)); # @
787 &pxor ($t0,$t1); # sigma0(X[1..4])
793 eval(shift(@insns)); # @
794 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
800 eval(shift(@insns)); # @
807 eval(shift(@insns)); # @
811 &pshufd ($t3,$t3,0b10000000);
814 eval(shift(@insns)); # @
819 eval(shift(@insns)); # @
825 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
826 eval(shift(@insns)); # @
831 eval(shift(@insns)); # @
833 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
838 eval(shift(@insns)); # @
845 eval(shift(@insns)); # @
852 eval(shift(@insns)); # @
857 &pshufd ($t3,$t3,0b00001000);
859 eval(shift(@insns)); # @
860 &movdqa ($t2,&QWP(16*$j,$K256));
866 eval(shift(@insns)); # @
871 eval(shift(@insns)); # @
872 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
878 eval(shift(@insns)); # @
880 foreach (@insns) { eval; } # remaining instructions
882 &movdqa (&QWP(32+16*$j,"esp"),$t2);
889 '&mov ("esi",&off($f));',
891 '&mov ("edi",&off($g));',
892 '&xor ("esi","edi");',
894 '&and ("esi","ecx");',
895 '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
897 '&xor ("edi","esi");', # Ch(e,f,g)
898 '&ror ($E,6);', # T = Sigma1(e)
899 '&mov ("ecx",$AH[0]);',
900 '&add ($E,"edi");', # T += Ch(e,f,g)
901 '&mov ("edi",&off($b));',
902 '&mov ("esi",$AH[0]);',
904 '&ror ("ecx",22-13);',
905 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
906 '&xor ("ecx",$AH[0]);',
907 '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
908 '&add ($E,&off($h));', # T += h
909 '&ror ("ecx",13-2);',
910 '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
911 '&xor ("ecx","esi");',
912 '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
913 '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
914 '&ror ("ecx",2);', # Sigma0(a)
916 '&add ($AH[1],$E);', # h += T
917 '&add ($E,&off($d));', # d += T
918 '&add ($AH[1],"ecx");'. # h += Sigma0(a)
920 '@AH = reverse(@AH); $i++;' # rotate(a,h)
924 for ($i=0,$j=0; $j<4; $j++) {
925 &SSSE3_00_47($j,\&body_00_15,@X);
926 push(@X,shift(@X)); # rotate(@X)
928 &cmp (&DWP(16*$j,$K256),0x00010203);
929 &jne (&label("ssse3_00_47"));
931 for ($i=0; $i<16; ) {
932 foreach(body_00_15()) { eval; }
935 &mov ("esi",&DWP(96,"esp")); #ctx
936 #&mov ($AH[0],&DWP(0,"esp"));
937 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
938 #&mov ("edi", &DWP(8,"esp"));
939 &mov ("ecx",&DWP(12,"esp"));
940 &add ($AH[0],&DWP(0,"esi"));
941 &add ($AH[1],&DWP(4,"esi"));
942 &add ("edi",&DWP(8,"esi"));
943 &add ("ecx",&DWP(12,"esi"));
944 &mov (&DWP(0,"esi"),$AH[0]);
945 &mov (&DWP(4,"esi"),$AH[1]);
946 &mov (&DWP(8,"esi"),"edi");
947 &mov (&DWP(12,"esi"),"ecx");
948 #&mov (&DWP(0,"esp"),$AH[0]);
949 &mov (&DWP(4,"esp"),$AH[1]);
950 &xor ($AH[1],"edi"); # magic
951 &mov (&DWP(8,"esp"),"edi");
952 &mov (&DWP(12,"esp"),"ecx");
953 #&mov ($E,&DWP(16,"esp"));
954 &mov ("edi",&DWP(20,"esp"));
955 &mov ("ecx",&DWP(24,"esp"));
956 &add ($E,&DWP(16,"esi"));
957 &add ("edi",&DWP(20,"esi"));
958 &add ("ecx",&DWP(24,"esi"));
959 &mov (&DWP(16,"esi"),$E);
960 &mov (&DWP(20,"esi"),"edi");
961 &mov (&DWP(20,"esp"),"edi");
962 &mov ("edi",&DWP(28,"esp"));
963 &mov (&DWP(24,"esi"),"ecx");
964 #&mov (&DWP(16,"esp"),$E);
965 &add ("edi",&DWP(28,"esi"));
966 &mov (&DWP(24,"esp"),"ecx");
967 &mov (&DWP(28,"esi"),"edi");
968 &mov (&DWP(28,"esp"),"edi");
969 &mov ("edi",&DWP(96+4,"esp")); # inp
971 &movdqa ($t3,&QWP(64,$K256));
972 &sub ($K256,3*64); # rewind K
973 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
974 &jb (&label("grand_ssse3"));
976 &mov ("esp",&DWP(96+12,"esp")); # restore sp
979 &set_label("AVX",32);
981 &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
982 &cmp ("edx",1<<8|1<<3);
983 &je (&label("AVX_BMI"));
985 &lea ("esp",&DWP(-96,"esp"));
987 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
988 &mov ($AH[0],&DWP(0,"esi"));
989 &mov ($AH[1],&DWP(4,"esi"));
990 &mov ("ecx",&DWP(8,"esi"));
991 &mov ("edi",&DWP(12,"esi"));
992 #&mov (&DWP(0,"esp"),$AH[0]);
993 &mov (&DWP(4,"esp"),$AH[1]);
994 &xor ($AH[1],"ecx"); # magic
995 &mov (&DWP(8,"esp"),"ecx");
996 &mov (&DWP(12,"esp"),"edi");
997 &mov ($E,&DWP(16,"esi"));
998 &mov ("edi",&DWP(20,"esi"));
999 &mov ("ecx",&DWP(24,"esi"));
1000 &mov ("esi",&DWP(28,"esi"));
1001 #&mov (&DWP(16,"esp"),$E);
1002 &mov (&DWP(20,"esp"),"edi");
1003 &mov ("edi",&DWP(96+4,"esp")); # inp
1004 &mov (&DWP(24,"esp"),"ecx");
1005 &mov (&DWP(28,"esp"),"esi");
1006 &vmovdqa ($t3,&QWP(256,$K256));
1007 &jmp (&label("grand_avx"));
1009 &set_label("grand_avx",32);
1010 # load input, reverse byte order, add K256[0..15], save to stack
1011 &vmovdqu (@X[0],&QWP(0,"edi"));
1012 &vmovdqu (@X[1],&QWP(16,"edi"));
1013 &vmovdqu (@X[2],&QWP(32,"edi"));
1014 &vmovdqu (@X[3],&QWP(48,"edi"));
1016 &vpshufb (@X[0],@X[0],$t3);
1017 &mov (&DWP(96+4,"esp"),"edi");
1018 &vpshufb (@X[1],@X[1],$t3);
1019 &vpshufb (@X[2],@X[2],$t3);
1020 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1021 &vpshufb (@X[3],@X[3],$t3);
1022 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1023 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1024 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1025 &vmovdqa (&QWP(32+0,"esp"),$t0);
1026 &vmovdqa (&QWP(32+16,"esp"),$t1);
1027 &vmovdqa (&QWP(32+32,"esp"),$t2);
1028 &vmovdqa (&QWP(32+48,"esp"),$t3);
1029 &jmp (&label("avx_00_47"));
1031 &set_label("avx_00_47",16);
1034 sub Xupdate_AVX () {
1036 '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
1037 '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
1038 '&vpsrld ($t2,$t0,7);',
1039 '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
1040 '&vpsrld ($t3,$t0,3);',
1041 '&vpslld ($t1,$t0,14);',
1042 '&vpxor ($t0,$t3,$t2);',
1043 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1044 '&vpsrld ($t2,$t2,18-7);',
1045 '&vpxor ($t0,$t0,$t1);',
1046 '&vpslld ($t1,$t1,25-14);',
1047 '&vpxor ($t0,$t0,$t2);',
1048 '&vpsrld ($t2,$t3,10);',
1049 '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
1050 '&vpsrlq ($t1,$t3,17);',
1051 '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
1052 '&vpxor ($t2,$t2,$t1);',
1053 '&vpsrlq ($t3,$t3,19);',
1054 '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
1055 '&vpshufd ($t3,$t2,0b10000100);',
1056 '&vpsrldq ($t3,$t3,8);',
1057 '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
1058 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1059 '&vpsrld ($t2,$t3,10);',
1060 '&vpsrlq ($t1,$t3,17);',
1061 '&vpxor ($t2,$t2,$t1);',
1062 '&vpsrlq ($t3,$t3,19);',
1063 '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
1064 '&vpshufd ($t3,$t2,0b11101000);',
1065 '&vpslldq ($t3,$t3,8);',
1066 '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
1070 local *ror = sub { &shrd(@_[0],@_) };
1075 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
1078 foreach (Xupdate_AVX()) { # 31 instructions
1080 eval(shift(@insns));
1081 eval(shift(@insns));
1082 eval($insn = shift(@insns));
1083 eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
1085 &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
1086 foreach (@insns) { eval; } # remaining instructions
1087 &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
1090 for ($i=0,$j=0; $j<4; $j++) {
1091 &AVX_00_47($j,\&body_00_15,@X);
1092 push(@X,shift(@X)); # rotate(@X)
1094 &cmp (&DWP(16*$j,$K256),0x00010203);
1095 &jne (&label("avx_00_47"));
1097 for ($i=0; $i<16; ) {
1098 foreach(body_00_15()) { eval; }
1101 &mov ("esi",&DWP(96,"esp")); #ctx
1102 #&mov ($AH[0],&DWP(0,"esp"));
1103 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1104 #&mov ("edi", &DWP(8,"esp"));
1105 &mov ("ecx",&DWP(12,"esp"));
1106 &add ($AH[0],&DWP(0,"esi"));
1107 &add ($AH[1],&DWP(4,"esi"));
1108 &add ("edi",&DWP(8,"esi"));
1109 &add ("ecx",&DWP(12,"esi"));
1110 &mov (&DWP(0,"esi"),$AH[0]);
1111 &mov (&DWP(4,"esi"),$AH[1]);
1112 &mov (&DWP(8,"esi"),"edi");
1113 &mov (&DWP(12,"esi"),"ecx");
1114 #&mov (&DWP(0,"esp"),$AH[0]);
1115 &mov (&DWP(4,"esp"),$AH[1]);
1116 &xor ($AH[1],"edi"); # magic
1117 &mov (&DWP(8,"esp"),"edi");
1118 &mov (&DWP(12,"esp"),"ecx");
1119 #&mov ($E,&DWP(16,"esp"));
1120 &mov ("edi",&DWP(20,"esp"));
1121 &mov ("ecx",&DWP(24,"esp"));
1122 &add ($E,&DWP(16,"esi"));
1123 &add ("edi",&DWP(20,"esi"));
1124 &add ("ecx",&DWP(24,"esi"));
1125 &mov (&DWP(16,"esi"),$E);
1126 &mov (&DWP(20,"esi"),"edi");
1127 &mov (&DWP(20,"esp"),"edi");
1128 &mov ("edi",&DWP(28,"esp"));
1129 &mov (&DWP(24,"esi"),"ecx");
1130 #&mov (&DWP(16,"esp"),$E);
1131 &add ("edi",&DWP(28,"esi"));
1132 &mov (&DWP(24,"esp"),"ecx");
1133 &mov (&DWP(28,"esi"),"edi");
1134 &mov (&DWP(28,"esp"),"edi");
1135 &mov ("edi",&DWP(96+4,"esp")); # inp
1137 &vmovdqa ($t3,&QWP(64,$K256));
1138 &sub ($K256,3*64); # rewind K
1139 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1140 &jb (&label("grand_avx"));
1142 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1146 sub bodyx_00_15 () { # +10%
1148 '&rorx ("ecx",$E,6)',
1149 '&rorx ("esi",$E,11)',
1150 '&mov (&off($e),$E)', # save $E, modulo-scheduled
1151 '&rorx ("edi",$E,25)',
1152 '&xor ("ecx","esi")',
1153 '&andn ("esi",$E,&off($g))',
1154 '&xor ("ecx","edi")', # Sigma1(e)
1155 '&and ($E,&off($f))',
1156 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
1157 '&or ($E,"esi")', # T = Ch(e,f,g)
1159 '&rorx ("edi",$AH[0],2)',
1160 '&rorx ("esi",$AH[0],13)',
1161 '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
1162 '&rorx ("ecx",$AH[0],22)',
1163 '&xor ("esi","edi")',
1164 '&mov ("edi",&off($b))',
1165 '&xor ("ecx","esi")', # Sigma0(a)
1167 '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
1168 '&add ($E,&off($h))', # T += h
1169 '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
1170 '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
1171 '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
1173 '&add ("ecx",$E)', # h += T
1174 '&add ($E,&off($d))', # d += T
1175 '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
1177 '@AH = reverse(@AH); $i++;' # rotate(a,h)
1181 &set_label("AVX_BMI",32);
1182 &lea ("esp",&DWP(-96,"esp"));
1184 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1185 &mov ($AH[0],&DWP(0,"esi"));
1186 &mov ($AH[1],&DWP(4,"esi"));
1187 &mov ("ecx",&DWP(8,"esi"));
1188 &mov ("edi",&DWP(12,"esi"));
1189 #&mov (&DWP(0,"esp"),$AH[0]);
1190 &mov (&DWP(4,"esp"),$AH[1]);
1191 &xor ($AH[1],"ecx"); # magic
1192 &mov (&DWP(8,"esp"),"ecx");
1193 &mov (&DWP(12,"esp"),"edi");
1194 &mov ($E,&DWP(16,"esi"));
1195 &mov ("edi",&DWP(20,"esi"));
1196 &mov ("ecx",&DWP(24,"esi"));
1197 &mov ("esi",&DWP(28,"esi"));
1198 #&mov (&DWP(16,"esp"),$E);
1199 &mov (&DWP(20,"esp"),"edi");
1200 &mov ("edi",&DWP(96+4,"esp")); # inp
1201 &mov (&DWP(24,"esp"),"ecx");
1202 &mov (&DWP(28,"esp"),"esi");
1203 &vmovdqa ($t3,&QWP(256,$K256));
1204 &jmp (&label("grand_avx_bmi"));
1206 &set_label("grand_avx_bmi",32);
1207 # load input, reverse byte order, add K256[0..15], save to stack
1208 &vmovdqu (@X[0],&QWP(0,"edi"));
1209 &vmovdqu (@X[1],&QWP(16,"edi"));
1210 &vmovdqu (@X[2],&QWP(32,"edi"));
1211 &vmovdqu (@X[3],&QWP(48,"edi"));
1213 &vpshufb (@X[0],@X[0],$t3);
1214 &mov (&DWP(96+4,"esp"),"edi");
1215 &vpshufb (@X[1],@X[1],$t3);
1216 &vpshufb (@X[2],@X[2],$t3);
1217 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1218 &vpshufb (@X[3],@X[3],$t3);
1219 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1220 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1221 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1222 &vmovdqa (&QWP(32+0,"esp"),$t0);
1223 &vmovdqa (&QWP(32+16,"esp"),$t1);
1224 &vmovdqa (&QWP(32+32,"esp"),$t2);
1225 &vmovdqa (&QWP(32+48,"esp"),$t3);
1226 &jmp (&label("avx_bmi_00_47"));
1228 &set_label("avx_bmi_00_47",16);
1231 for ($i=0,$j=0; $j<4; $j++) {
1232 &AVX_00_47($j,\&bodyx_00_15,@X);
1233 push(@X,shift(@X)); # rotate(@X)
1235 &cmp (&DWP(16*$j,$K256),0x00010203);
1236 &jne (&label("avx_bmi_00_47"));
1238 for ($i=0; $i<16; ) {
1239 foreach(bodyx_00_15()) { eval; }
1242 &mov ("esi",&DWP(96,"esp")); #ctx
1243 #&mov ($AH[0],&DWP(0,"esp"));
1244 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1245 #&mov ("edi", &DWP(8,"esp"));
1246 &mov ("ecx",&DWP(12,"esp"));
1247 &add ($AH[0],&DWP(0,"esi"));
1248 &add ($AH[1],&DWP(4,"esi"));
1249 &add ("edi",&DWP(8,"esi"));
1250 &add ("ecx",&DWP(12,"esi"));
1251 &mov (&DWP(0,"esi"),$AH[0]);
1252 &mov (&DWP(4,"esi"),$AH[1]);
1253 &mov (&DWP(8,"esi"),"edi");
1254 &mov (&DWP(12,"esi"),"ecx");
1255 #&mov (&DWP(0,"esp"),$AH[0]);
1256 &mov (&DWP(4,"esp"),$AH[1]);
1257 &xor ($AH[1],"edi"); # magic
1258 &mov (&DWP(8,"esp"),"edi");
1259 &mov (&DWP(12,"esp"),"ecx");
1260 #&mov ($E,&DWP(16,"esp"));
1261 &mov ("edi",&DWP(20,"esp"));
1262 &mov ("ecx",&DWP(24,"esp"));
1263 &add ($E,&DWP(16,"esi"));
1264 &add ("edi",&DWP(20,"esi"));
1265 &add ("ecx",&DWP(24,"esi"));
1266 &mov (&DWP(16,"esi"),$E);
1267 &mov (&DWP(20,"esi"),"edi");
1268 &mov (&DWP(20,"esp"),"edi");
1269 &mov ("edi",&DWP(28,"esp"));
1270 &mov (&DWP(24,"esi"),"ecx");
1271 #&mov (&DWP(16,"esp"),$E);
1272 &add ("edi",&DWP(28,"esi"));
1273 &mov (&DWP(24,"esp"),"ecx");
1274 &mov (&DWP(28,"esi"),"edi");
1275 &mov (&DWP(28,"esp"),"edi");
1276 &mov ("edi",&DWP(96+4,"esp")); # inp
1278 &vmovdqa ($t3,&QWP(64,$K256));
1279 &sub ($K256,3*64); # rewind K
1280 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1281 &jb (&label("grand_avx_bmi"));
1283 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1289 &function_end_B("sha256_block_data_order");