]> WPIA git - cassiopeia.git/blob - lib/openssl/crypto/modes/asm/ghash-x86_64.pl
upd: openssl to 1.1.0
[cassiopeia.git] / lib / openssl / crypto / modes / asm / ghash-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # March, June 2010
18 #
19 # The module implements "4-bit" GCM GHASH function and underlying
20 # single multiplication operation in GF(2^128). "4-bit" means that
21 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
22 # function features so called "528B" variant utilizing additional
23 # 256+16 bytes of per-key storage [+512 bytes shared table].
24 # Performance results are for this streamed GHASH subroutine and are
25 # expressed in cycles per processed byte, less is better:
26 #
27 #               gcc 3.4.x(*)    assembler
28 #
29 # P4            28.6            14.0            +100%
30 # Opteron       19.3            7.7             +150%
31 # Core2         17.8            8.1(**)         +120%
32 # Atom          31.6            16.8            +88%
33 # VIA Nano      21.8            10.1            +115%
34 #
35 # (*)   comparison is not completely fair, because C results are
36 #       for vanilla "256B" implementation, while assembler results
37 #       are for "528B";-)
38 # (**)  it's mystery [to me] why Core2 result is not same as for
39 #       Opteron;
40
41 # May 2010
42 #
43 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
44 # See ghash-x86.pl for background information and details about coding
45 # techniques.
46 #
47 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
48 # providing access to a Westmere-based system on behalf of Intel
49 # Open Source Technology Centre.
50
51 # December 2012
52 #
53 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
54 # reduction_alg9, increase reduction aggregate factor to 4x. As for
55 # the latter. ghash-x86.pl discusses that it makes lesser sense to
56 # increase aggregate factor. Then why increase here? Critical path
57 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
58 # processing and reduction. "On top" of this we lay down aggregated
59 # multiplication operations, triplets of independent pclmulqdq's. As
60 # issue rate for pclmulqdq is limited, it makes lesser sense to
61 # aggregate more multiplications than it takes to perform remaining
62 # non-multiplication operations. 2x is near-optimal coefficient for
63 # contemporary Intel CPUs (therefore modest improvement coefficient),
64 # but not for Bulldozer. Latter is because logical SIMD operations
65 # are twice as slow in comparison to Intel, so that critical path is
66 # longer. A CPU with higher pclmulqdq issue rate would also benefit
67 # from higher aggregate factor...
68 #
69 # Westmere      1.78(+13%)
70 # Sandy Bridge  1.80(+8%)
71 # Ivy Bridge    1.80(+7%)
72 # Haswell       0.55(+93%) (if system doesn't support AVX)
73 # Broadwell     0.45(+110%)(if system doesn't support AVX)
74 # Skylake       0.44(+110%)(if system doesn't support AVX)
75 # Bulldozer     1.49(+27%)
76 # Silvermont    2.88(+13%)
77 # Goldmont      1.08(+24%)
78
79 # March 2013
80 #
81 # ... 8x aggregate factor AVX code path is using reduction algorithm
82 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable
83 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs
84 # sub-optimally in comparison to above mentioned version. But thanks
85 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
86 # it performs in 0.41 cycles per byte on Haswell processor, in
87 # 0.29 on Broadwell, and in 0.36 on Skylake.
88 #
89 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
90
91 $flavour = shift;
92 $output  = shift;
93 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
94
95 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
96
97 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
98 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
99 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
100 die "can't locate x86_64-xlate.pl";
101
102 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
103                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
104         $avx = ($1>=2.20) + ($1>=2.22);
105 }
106
107 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
108             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
109         $avx = ($1>=2.09) + ($1>=2.10);
110 }
111
112 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
113             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
114         $avx = ($1>=10) + ($1>=11);
115 }
116
117 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
118         $avx = ($2>=3.0) + ($2>3.0);
119 }
120
121 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
122 *STDOUT=*OUT;
123
124 $do4xaggr=1;
125
126 # common register layout
127 $nlo="%rax";
128 $nhi="%rbx";
129 $Zlo="%r8";
130 $Zhi="%r9";
131 $tmp="%r10";
132 $rem_4bit = "%r11";
133
134 $Xi="%rdi";
135 $Htbl="%rsi";
136
137 # per-function register layout
138 $cnt="%rcx";
139 $rem="%rdx";
140
141 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/     or
142                         $r =~ s/%[er]([sd]i)/%\1l/      or
143                         $r =~ s/%[er](bp)/%\1l/         or
144                         $r =~ s/%(r[0-9]+)[d]?/%\1b/;   $r; }
145
146 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
147 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
148   my $arg = pop;
149     $arg = "\$$arg" if ($arg*1 eq $arg);
150     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
151 }
152 \f
153 { my $N;
154   sub loop() {
155   my $inp = shift;
156
157         $N++;
158 $code.=<<___;
159         xor     $nlo,$nlo
160         xor     $nhi,$nhi
161         mov     `&LB("$Zlo")`,`&LB("$nlo")`
162         mov     `&LB("$Zlo")`,`&LB("$nhi")`
163         shl     \$4,`&LB("$nlo")`
164         mov     \$14,$cnt
165         mov     8($Htbl,$nlo),$Zlo
166         mov     ($Htbl,$nlo),$Zhi
167         and     \$0xf0,`&LB("$nhi")`
168         mov     $Zlo,$rem
169         jmp     .Loop$N
170
171 .align  16
172 .Loop$N:
173         shr     \$4,$Zlo
174         and     \$0xf,$rem
175         mov     $Zhi,$tmp
176         mov     ($inp,$cnt),`&LB("$nlo")`
177         shr     \$4,$Zhi
178         xor     8($Htbl,$nhi),$Zlo
179         shl     \$60,$tmp
180         xor     ($Htbl,$nhi),$Zhi
181         mov     `&LB("$nlo")`,`&LB("$nhi")`
182         xor     ($rem_4bit,$rem,8),$Zhi
183         mov     $Zlo,$rem
184         shl     \$4,`&LB("$nlo")`
185         xor     $tmp,$Zlo
186         dec     $cnt
187         js      .Lbreak$N
188
189         shr     \$4,$Zlo
190         and     \$0xf,$rem
191         mov     $Zhi,$tmp
192         shr     \$4,$Zhi
193         xor     8($Htbl,$nlo),$Zlo
194         shl     \$60,$tmp
195         xor     ($Htbl,$nlo),$Zhi
196         and     \$0xf0,`&LB("$nhi")`
197         xor     ($rem_4bit,$rem,8),$Zhi
198         mov     $Zlo,$rem
199         xor     $tmp,$Zlo
200         jmp     .Loop$N
201
202 .align  16
203 .Lbreak$N:
204         shr     \$4,$Zlo
205         and     \$0xf,$rem
206         mov     $Zhi,$tmp
207         shr     \$4,$Zhi
208         xor     8($Htbl,$nlo),$Zlo
209         shl     \$60,$tmp
210         xor     ($Htbl,$nlo),$Zhi
211         and     \$0xf0,`&LB("$nhi")`
212         xor     ($rem_4bit,$rem,8),$Zhi
213         mov     $Zlo,$rem
214         xor     $tmp,$Zlo
215
216         shr     \$4,$Zlo
217         and     \$0xf,$rem
218         mov     $Zhi,$tmp
219         shr     \$4,$Zhi
220         xor     8($Htbl,$nhi),$Zlo
221         shl     \$60,$tmp
222         xor     ($Htbl,$nhi),$Zhi
223         xor     $tmp,$Zlo
224         xor     ($rem_4bit,$rem,8),$Zhi
225
226         bswap   $Zlo
227         bswap   $Zhi
228 ___
229 }}
230
231 $code=<<___;
232 .text
233 .extern OPENSSL_ia32cap_P
234
235 .globl  gcm_gmult_4bit
236 .type   gcm_gmult_4bit,\@function,2
237 .align  16
238 gcm_gmult_4bit:
239         push    %rbx
240         push    %rbp            # %rbp and %r12 are pushed exclusively in
241         push    %r12            # order to reuse Win64 exception handler...
242 .Lgmult_prologue:
243
244         movzb   15($Xi),$Zlo
245         lea     .Lrem_4bit(%rip),$rem_4bit
246 ___
247         &loop   ($Xi);
248 $code.=<<___;
249         mov     $Zlo,8($Xi)
250         mov     $Zhi,($Xi)
251
252         mov     16(%rsp),%rbx
253         lea     24(%rsp),%rsp
254 .Lgmult_epilogue:
255         ret
256 .size   gcm_gmult_4bit,.-gcm_gmult_4bit
257 ___
258 \f
259 # per-function register layout
260 $inp="%rdx";
261 $len="%rcx";
262 $rem_8bit=$rem_4bit;
263
264 $code.=<<___;
265 .globl  gcm_ghash_4bit
266 .type   gcm_ghash_4bit,\@function,4
267 .align  16
268 gcm_ghash_4bit:
269         push    %rbx
270         push    %rbp
271         push    %r12
272         push    %r13
273         push    %r14
274         push    %r15
275         sub     \$280,%rsp
276 .Lghash_prologue:
277         mov     $inp,%r14               # reassign couple of args
278         mov     $len,%r15
279 ___
280 { my $inp="%r14";
281   my $dat="%edx";
282   my $len="%r15";
283   my @nhi=("%ebx","%ecx");
284   my @rem=("%r12","%r13");
285   my $Hshr4="%rbp";
286
287         &sub    ($Htbl,-128);           # size optimization
288         &lea    ($Hshr4,"16+128(%rsp)");
289         { my @lo =($nlo,$nhi);
290           my @hi =($Zlo,$Zhi);
291
292           &xor  ($dat,$dat);
293           for ($i=0,$j=-2;$i<18;$i++,$j++) {
294             &mov        ("$j(%rsp)",&LB($dat))          if ($i>1);
295             &or         ($lo[0],$tmp)                   if ($i>1);
296             &mov        (&LB($dat),&LB($lo[1]))         if ($i>0 && $i<17);
297             &shr        ($lo[1],4)                      if ($i>0 && $i<17);
298             &mov        ($tmp,$hi[1])                   if ($i>0 && $i<17);
299             &shr        ($hi[1],4)                      if ($i>0 && $i<17);
300             &mov        ("8*$j($Hshr4)",$hi[0])         if ($i>1);
301             &mov        ($hi[0],"16*$i+0-128($Htbl)")   if ($i<16);
302             &shl        (&LB($dat),4)                   if ($i>0 && $i<17);
303             &mov        ("8*$j-128($Hshr4)",$lo[0])     if ($i>1);
304             &mov        ($lo[0],"16*$i+8-128($Htbl)")   if ($i<16);
305             &shl        ($tmp,60)                       if ($i>0 && $i<17);
306
307             push        (@lo,shift(@lo));
308             push        (@hi,shift(@hi));
309           }
310         }
311         &add    ($Htbl,-128);
312         &mov    ($Zlo,"8($Xi)");
313         &mov    ($Zhi,"0($Xi)");
314         &add    ($len,$inp);            # pointer to the end of data
315         &lea    ($rem_8bit,".Lrem_8bit(%rip)");
316         &jmp    (".Louter_loop");
317
318 $code.=".align  16\n.Louter_loop:\n";
319         &xor    ($Zhi,"($inp)");
320         &mov    ("%rdx","8($inp)");
321         &lea    ($inp,"16($inp)");
322         &xor    ("%rdx",$Zlo);
323         &mov    ("($Xi)",$Zhi);
324         &mov    ("8($Xi)","%rdx");
325         &shr    ("%rdx",32);
326
327         &xor    ($nlo,$nlo);
328         &rol    ($dat,8);
329         &mov    (&LB($nlo),&LB($dat));
330         &movz   ($nhi[0],&LB($dat));
331         &shl    (&LB($nlo),4);
332         &shr    ($nhi[0],4);
333
334         for ($j=11,$i=0;$i<15;$i++) {
335             &rol        ($dat,8);
336             &xor        ($Zlo,"8($Htbl,$nlo)")                  if ($i>0);
337             &xor        ($Zhi,"($Htbl,$nlo)")                   if ($i>0);
338             &mov        ($Zlo,"8($Htbl,$nlo)")                  if ($i==0);
339             &mov        ($Zhi,"($Htbl,$nlo)")                   if ($i==0);
340
341             &mov        (&LB($nlo),&LB($dat));
342             &xor        ($Zlo,$tmp)                             if ($i>0);
343             &movzw      ($rem[1],"($rem_8bit,$rem[1],2)")       if ($i>0);
344
345             &movz       ($nhi[1],&LB($dat));
346             &shl        (&LB($nlo),4);
347             &movzb      ($rem[0],"(%rsp,$nhi[0])");
348
349             &shr        ($nhi[1],4)                             if ($i<14);
350             &and        ($nhi[1],0xf0)                          if ($i==14);
351             &shl        ($rem[1],48)                            if ($i>0);
352             &xor        ($rem[0],$Zlo);
353
354             &mov        ($tmp,$Zhi);
355             &xor        ($Zhi,$rem[1])                          if ($i>0);
356             &shr        ($Zlo,8);
357
358             &movz       ($rem[0],&LB($rem[0]));
359             &mov        ($dat,"$j($Xi)")                        if (--$j%4==0);
360             &shr        ($Zhi,8);
361
362             &xor        ($Zlo,"-128($Hshr4,$nhi[0],8)");
363             &shl        ($tmp,56);
364             &xor        ($Zhi,"($Hshr4,$nhi[0],8)");
365
366             unshift     (@nhi,pop(@nhi));               # "rotate" registers
367             unshift     (@rem,pop(@rem));
368         }
369         &movzw  ($rem[1],"($rem_8bit,$rem[1],2)");
370         &xor    ($Zlo,"8($Htbl,$nlo)");
371         &xor    ($Zhi,"($Htbl,$nlo)");
372
373         &shl    ($rem[1],48);
374         &xor    ($Zlo,$tmp);
375
376         &xor    ($Zhi,$rem[1]);
377         &movz   ($rem[0],&LB($Zlo));
378         &shr    ($Zlo,4);
379
380         &mov    ($tmp,$Zhi);
381         &shl    (&LB($rem[0]),4);
382         &shr    ($Zhi,4);
383
384         &xor    ($Zlo,"8($Htbl,$nhi[0])");
385         &movzw  ($rem[0],"($rem_8bit,$rem[0],2)");
386         &shl    ($tmp,60);
387
388         &xor    ($Zhi,"($Htbl,$nhi[0])");
389         &xor    ($Zlo,$tmp);
390         &shl    ($rem[0],48);
391
392         &bswap  ($Zlo);
393         &xor    ($Zhi,$rem[0]);
394
395         &bswap  ($Zhi);
396         &cmp    ($inp,$len);
397         &jb     (".Louter_loop");
398 }
399 $code.=<<___;
400         mov     $Zlo,8($Xi)
401         mov     $Zhi,($Xi)
402
403         lea     280(%rsp),%rsi
404         mov     0(%rsi),%r15
405         mov     8(%rsi),%r14
406         mov     16(%rsi),%r13
407         mov     24(%rsi),%r12
408         mov     32(%rsi),%rbp
409         mov     40(%rsi),%rbx
410         lea     48(%rsi),%rsp
411 .Lghash_epilogue:
412         ret
413 .size   gcm_ghash_4bit,.-gcm_ghash_4bit
414 ___
415 \f
416 ######################################################################
417 # PCLMULQDQ version.
418
419 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
420                 ("%rdi","%rsi","%rdx","%rcx");  # Unix order
421
422 ($Xi,$Xhi)=("%xmm0","%xmm1");   $Hkey="%xmm2";
423 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
424
425 sub clmul64x64_T2 {     # minimal register pressure
426 my ($Xhi,$Xi,$Hkey,$HK)=@_;
427
428 if (!defined($HK)) {    $HK = $T2;
429 $code.=<<___;
430         movdqa          $Xi,$Xhi                #
431         pshufd          \$0b01001110,$Xi,$T1
432         pshufd          \$0b01001110,$Hkey,$T2
433         pxor            $Xi,$T1                 #
434         pxor            $Hkey,$T2
435 ___
436 } else {
437 $code.=<<___;
438         movdqa          $Xi,$Xhi                #
439         pshufd          \$0b01001110,$Xi,$T1
440         pxor            $Xi,$T1                 #
441 ___
442 }
443 $code.=<<___;
444         pclmulqdq       \$0x00,$Hkey,$Xi        #######
445         pclmulqdq       \$0x11,$Hkey,$Xhi       #######
446         pclmulqdq       \$0x00,$HK,$T1          #######
447         pxor            $Xi,$T1                 #
448         pxor            $Xhi,$T1                #
449
450         movdqa          $T1,$T2                 #
451         psrldq          \$8,$T1
452         pslldq          \$8,$T2                 #
453         pxor            $T1,$Xhi
454         pxor            $T2,$Xi                 #
455 ___
456 }
457
458 sub reduction_alg9 {    # 17/11 times faster than Intel version
459 my ($Xhi,$Xi) = @_;
460
461 $code.=<<___;
462         # 1st phase
463         movdqa          $Xi,$T2                 #
464         movdqa          $Xi,$T1
465         psllq           \$5,$Xi
466         pxor            $Xi,$T1                 #
467         psllq           \$1,$Xi
468         pxor            $T1,$Xi                 #
469         psllq           \$57,$Xi                #
470         movdqa          $Xi,$T1                 #
471         pslldq          \$8,$Xi
472         psrldq          \$8,$T1                 #       
473         pxor            $T2,$Xi
474         pxor            $T1,$Xhi                #
475
476         # 2nd phase
477         movdqa          $Xi,$T2
478         psrlq           \$1,$Xi
479         pxor            $T2,$Xhi                #
480         pxor            $Xi,$T2
481         psrlq           \$5,$Xi
482         pxor            $T2,$Xi                 #
483         psrlq           \$1,$Xi                 #
484         pxor            $Xhi,$Xi                #
485 ___
486 }
487 \f
488 { my ($Htbl,$Xip)=@_4args;
489   my $HK="%xmm6";
490
491 $code.=<<___;
492 .globl  gcm_init_clmul
493 .type   gcm_init_clmul,\@abi-omnipotent
494 .align  16
495 gcm_init_clmul:
496 .L_init_clmul:
497 ___
498 $code.=<<___ if ($win64);
499 .LSEH_begin_gcm_init_clmul:
500         # I can't trust assembler to use specific encoding:-(
501         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
502         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
503 ___
504 $code.=<<___;
505         movdqu          ($Xip),$Hkey
506         pshufd          \$0b01001110,$Hkey,$Hkey        # dword swap
507
508         # <<1 twist
509         pshufd          \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
510         movdqa          $Hkey,$T1
511         psllq           \$1,$Hkey
512         pxor            $T3,$T3                 #
513         psrlq           \$63,$T1
514         pcmpgtd         $T2,$T3                 # broadcast carry bit
515         pslldq          \$8,$T1
516         por             $T1,$Hkey               # H<<=1
517
518         # magic reduction
519         pand            .L0x1c2_polynomial(%rip),$T3
520         pxor            $T3,$Hkey               # if(carry) H^=0x1c2_polynomial
521
522         # calculate H^2
523         pshufd          \$0b01001110,$Hkey,$HK
524         movdqa          $Hkey,$Xi
525         pxor            $Hkey,$HK
526 ___
527         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);
528         &reduction_alg9 ($Xhi,$Xi);
529 $code.=<<___;
530         pshufd          \$0b01001110,$Hkey,$T1
531         pshufd          \$0b01001110,$Xi,$T2
532         pxor            $Hkey,$T1               # Karatsuba pre-processing
533         movdqu          $Hkey,0x00($Htbl)       # save H
534         pxor            $Xi,$T2                 # Karatsuba pre-processing
535         movdqu          $Xi,0x10($Htbl)         # save H^2
536         palignr         \$8,$T1,$T2             # low part is H.lo^H.hi...
537         movdqu          $T2,0x20($Htbl)         # save Karatsuba "salt"
538 ___
539 if ($do4xaggr) {
540         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^3
541         &reduction_alg9 ($Xhi,$Xi);
542 $code.=<<___;
543         movdqa          $Xi,$T3
544 ___
545         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^4
546         &reduction_alg9 ($Xhi,$Xi);
547 $code.=<<___;
548         pshufd          \$0b01001110,$T3,$T1
549         pshufd          \$0b01001110,$Xi,$T2
550         pxor            $T3,$T1                 # Karatsuba pre-processing
551         movdqu          $T3,0x30($Htbl)         # save H^3
552         pxor            $Xi,$T2                 # Karatsuba pre-processing
553         movdqu          $Xi,0x40($Htbl)         # save H^4
554         palignr         \$8,$T1,$T2             # low part is H^3.lo^H^3.hi...
555         movdqu          $T2,0x50($Htbl)         # save Karatsuba "salt"
556 ___
557 }
558 $code.=<<___ if ($win64);
559         movaps  (%rsp),%xmm6
560         lea     0x18(%rsp),%rsp
561 .LSEH_end_gcm_init_clmul:
562 ___
563 $code.=<<___;
564         ret
565 .size   gcm_init_clmul,.-gcm_init_clmul
566 ___
567 }
568
569 { my ($Xip,$Htbl)=@_4args;
570
571 $code.=<<___;
572 .globl  gcm_gmult_clmul
573 .type   gcm_gmult_clmul,\@abi-omnipotent
574 .align  16
575 gcm_gmult_clmul:
576 .L_gmult_clmul:
577         movdqu          ($Xip),$Xi
578         movdqa          .Lbswap_mask(%rip),$T3
579         movdqu          ($Htbl),$Hkey
580         movdqu          0x20($Htbl),$T2
581         pshufb          $T3,$Xi
582 ___
583         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$T2);
584 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
585         # experimental alternative. special thing about is that there
586         # no dependency between the two multiplications... 
587         mov             \$`0xE1<<1`,%eax
588         mov             \$0xA040608020C0E000,%r10       # ((7..0)·0xE0)&0xff
589         mov             \$0x07,%r11d
590         movq            %rax,$T1
591         movq            %r10,$T2
592         movq            %r11,$T3                # borrow $T3
593         pand            $Xi,$T3
594         pshufb          $T3,$T2                 # ($Xi&7)·0xE0
595         movq            %rax,$T3
596         pclmulqdq       \$0x00,$Xi,$T1          # Â·(0xE1<<1)
597         pxor            $Xi,$T2
598         pslldq          \$15,$T2
599         paddd           $T2,$T2                 # <<(64+56+1)
600         pxor            $T2,$Xi
601         pclmulqdq       \$0x01,$T3,$Xi
602         movdqa          .Lbswap_mask(%rip),$T3  # reload $T3
603         psrldq          \$1,$T1
604         pxor            $T1,$Xhi
605         pslldq          \$7,$Xi
606         pxor            $Xhi,$Xi
607 ___
608 $code.=<<___;
609         pshufb          $T3,$Xi
610         movdqu          $Xi,($Xip)
611         ret
612 .size   gcm_gmult_clmul,.-gcm_gmult_clmul
613 ___
614 }
615 \f
616 { my ($Xip,$Htbl,$inp,$len)=@_4args;
617   my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7));
618   my ($T1,$T2,$T3)=map("%xmm$_",(8..10));
619
620 $code.=<<___;
621 .globl  gcm_ghash_clmul
622 .type   gcm_ghash_clmul,\@abi-omnipotent
623 .align  32
624 gcm_ghash_clmul:
625 .L_ghash_clmul:
626 ___
627 $code.=<<___ if ($win64);
628         lea     -0x88(%rsp),%rax
629 .LSEH_begin_gcm_ghash_clmul:
630         # I can't trust assembler to use specific encoding:-(
631         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
632         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
633         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
634         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
635         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
636         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
637         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
638         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
639         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
640         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
641         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
642 ___
643 $code.=<<___;
644         movdqa          .Lbswap_mask(%rip),$T3
645
646         movdqu          ($Xip),$Xi
647         movdqu          ($Htbl),$Hkey
648         movdqu          0x20($Htbl),$HK
649         pshufb          $T3,$Xi
650
651         sub             \$0x10,$len
652         jz              .Lodd_tail
653
654         movdqu          0x10($Htbl),$Hkey2
655 ___
656 if ($do4xaggr) {
657 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
658
659 $code.=<<___;
660         mov             OPENSSL_ia32cap_P+4(%rip),%eax
661         cmp             \$0x30,$len
662         jb              .Lskip4x
663
664         and             \$`1<<26|1<<22`,%eax    # isolate MOVBE+XSAVE
665         cmp             \$`1<<22`,%eax          # check for MOVBE without XSAVE
666         je              .Lskip4x
667
668         sub             \$0x30,$len
669         mov             \$0xA040608020C0E000,%rax       # ((7..0)·0xE0)&0xff
670         movdqu          0x30($Htbl),$Hkey3
671         movdqu          0x40($Htbl),$Hkey4
672
673         #######
674         # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
675         #
676         movdqu          0x30($inp),$Xln
677          movdqu         0x20($inp),$Xl
678         pshufb          $T3,$Xln
679          pshufb         $T3,$Xl
680         movdqa          $Xln,$Xhn
681         pshufd          \$0b01001110,$Xln,$Xmn
682         pxor            $Xln,$Xmn
683         pclmulqdq       \$0x00,$Hkey,$Xln
684         pclmulqdq       \$0x11,$Hkey,$Xhn
685         pclmulqdq       \$0x00,$HK,$Xmn
686
687         movdqa          $Xl,$Xh
688         pshufd          \$0b01001110,$Xl,$Xm
689         pxor            $Xl,$Xm
690         pclmulqdq       \$0x00,$Hkey2,$Xl
691         pclmulqdq       \$0x11,$Hkey2,$Xh
692         pclmulqdq       \$0x10,$HK,$Xm
693         xorps           $Xl,$Xln
694         xorps           $Xh,$Xhn
695         movups          0x50($Htbl),$HK
696         xorps           $Xm,$Xmn
697
698         movdqu          0x10($inp),$Xl
699          movdqu         0($inp),$T1
700         pshufb          $T3,$Xl
701          pshufb         $T3,$T1
702         movdqa          $Xl,$Xh
703         pshufd          \$0b01001110,$Xl,$Xm
704          pxor           $T1,$Xi
705         pxor            $Xl,$Xm
706         pclmulqdq       \$0x00,$Hkey3,$Xl
707          movdqa         $Xi,$Xhi
708          pshufd         \$0b01001110,$Xi,$T1
709          pxor           $Xi,$T1
710         pclmulqdq       \$0x11,$Hkey3,$Xh
711         pclmulqdq       \$0x00,$HK,$Xm
712         xorps           $Xl,$Xln
713         xorps           $Xh,$Xhn
714
715         lea     0x40($inp),$inp
716         sub     \$0x40,$len
717         jc      .Ltail4x
718
719         jmp     .Lmod4_loop
720 .align  32
721 .Lmod4_loop:
722         pclmulqdq       \$0x00,$Hkey4,$Xi
723         xorps           $Xm,$Xmn
724          movdqu         0x30($inp),$Xl
725          pshufb         $T3,$Xl
726         pclmulqdq       \$0x11,$Hkey4,$Xhi
727         xorps           $Xln,$Xi
728          movdqu         0x20($inp),$Xln
729          movdqa         $Xl,$Xh
730         pclmulqdq       \$0x10,$HK,$T1
731          pshufd         \$0b01001110,$Xl,$Xm
732         xorps           $Xhn,$Xhi
733          pxor           $Xl,$Xm
734          pshufb         $T3,$Xln
735         movups          0x20($Htbl),$HK
736         xorps           $Xmn,$T1
737          pclmulqdq      \$0x00,$Hkey,$Xl
738          pshufd         \$0b01001110,$Xln,$Xmn
739
740         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
741          movdqa         $Xln,$Xhn
742         pxor            $Xhi,$T1                #
743          pxor           $Xln,$Xmn
744         movdqa          $T1,$T2                 #
745          pclmulqdq      \$0x11,$Hkey,$Xh
746         pslldq          \$8,$T1
747         psrldq          \$8,$T2                 #
748         pxor            $T1,$Xi
749         movdqa          .L7_mask(%rip),$T1
750         pxor            $T2,$Xhi                #
751         movq            %rax,$T2
752
753         pand            $Xi,$T1                 # 1st phase
754         pshufb          $T1,$T2                 #
755         pxor            $Xi,$T2                 #
756          pclmulqdq      \$0x00,$HK,$Xm
757         psllq           \$57,$T2                #
758         movdqa          $T2,$T1                 #
759         pslldq          \$8,$T2
760          pclmulqdq      \$0x00,$Hkey2,$Xln
761         psrldq          \$8,$T1                 #       
762         pxor            $T2,$Xi
763         pxor            $T1,$Xhi                #
764         movdqu          0($inp),$T1
765
766         movdqa          $Xi,$T2                 # 2nd phase
767         psrlq           \$1,$Xi
768          pclmulqdq      \$0x11,$Hkey2,$Xhn
769          xorps          $Xl,$Xln
770          movdqu         0x10($inp),$Xl
771          pshufb         $T3,$Xl
772          pclmulqdq      \$0x10,$HK,$Xmn
773          xorps          $Xh,$Xhn
774          movups         0x50($Htbl),$HK
775         pshufb          $T3,$T1
776         pxor            $T2,$Xhi                #
777         pxor            $Xi,$T2
778         psrlq           \$5,$Xi
779
780          movdqa         $Xl,$Xh
781          pxor           $Xm,$Xmn
782          pshufd         \$0b01001110,$Xl,$Xm
783         pxor            $T2,$Xi                 #
784         pxor            $T1,$Xhi
785          pxor           $Xl,$Xm
786          pclmulqdq      \$0x00,$Hkey3,$Xl
787         psrlq           \$1,$Xi                 #
788         pxor            $Xhi,$Xi                #
789         movdqa          $Xi,$Xhi
790          pclmulqdq      \$0x11,$Hkey3,$Xh
791          xorps          $Xl,$Xln
792         pshufd          \$0b01001110,$Xi,$T1
793         pxor            $Xi,$T1
794
795          pclmulqdq      \$0x00,$HK,$Xm
796          xorps          $Xh,$Xhn
797
798         lea     0x40($inp),$inp
799         sub     \$0x40,$len
800         jnc     .Lmod4_loop
801
802 .Ltail4x:
803         pclmulqdq       \$0x00,$Hkey4,$Xi
804         pclmulqdq       \$0x11,$Hkey4,$Xhi
805         pclmulqdq       \$0x10,$HK,$T1
806         xorps           $Xm,$Xmn
807         xorps           $Xln,$Xi
808         xorps           $Xhn,$Xhi
809         pxor            $Xi,$Xhi                # aggregated Karatsuba post-processing
810         pxor            $Xmn,$T1
811
812         pxor            $Xhi,$T1                #
813         pxor            $Xi,$Xhi
814
815         movdqa          $T1,$T2                 #
816         psrldq          \$8,$T1
817         pslldq          \$8,$T2                 #
818         pxor            $T1,$Xhi
819         pxor            $T2,$Xi                 #
820 ___
821         &reduction_alg9($Xhi,$Xi);
822 $code.=<<___;
823         add     \$0x40,$len
824         jz      .Ldone
825         movdqu  0x20($Htbl),$HK
826         sub     \$0x10,$len
827         jz      .Lodd_tail
828 .Lskip4x:
829 ___
830 }
831 $code.=<<___;
832         #######
833         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
834         #       [(H*Ii+1) + (H*Xi+1)] mod P =
835         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
836         #
837         movdqu          ($inp),$T1              # Ii
838         movdqu          16($inp),$Xln           # Ii+1
839         pshufb          $T3,$T1
840         pshufb          $T3,$Xln
841         pxor            $T1,$Xi                 # Ii+Xi
842
843         movdqa          $Xln,$Xhn
844         pshufd          \$0b01001110,$Xln,$Xmn
845         pxor            $Xln,$Xmn
846         pclmulqdq       \$0x00,$Hkey,$Xln
847         pclmulqdq       \$0x11,$Hkey,$Xhn
848         pclmulqdq       \$0x00,$HK,$Xmn
849
850         lea             32($inp),$inp           # i+=2
851         nop
852         sub             \$0x20,$len
853         jbe             .Leven_tail
854         nop
855         jmp             .Lmod_loop
856
857 .align  32
858 .Lmod_loop:
859         movdqa          $Xi,$Xhi
860         movdqa          $Xmn,$T1
861         pshufd          \$0b01001110,$Xi,$Xmn   #
862         pxor            $Xi,$Xmn                #
863
864         pclmulqdq       \$0x00,$Hkey2,$Xi
865         pclmulqdq       \$0x11,$Hkey2,$Xhi
866         pclmulqdq       \$0x10,$HK,$Xmn
867
868         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
869         pxor            $Xhn,$Xhi
870           movdqu        ($inp),$T2              # Ii
871         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
872           pshufb        $T3,$T2
873           movdqu        16($inp),$Xln           # Ii+1
874
875         pxor            $Xhi,$T1
876           pxor          $T2,$Xhi                # "Ii+Xi", consume early
877         pxor            $T1,$Xmn
878          pshufb         $T3,$Xln
879         movdqa          $Xmn,$T1                #
880         psrldq          \$8,$T1
881         pslldq          \$8,$Xmn                #
882         pxor            $T1,$Xhi
883         pxor            $Xmn,$Xi                #
884
885         movdqa          $Xln,$Xhn               #
886
887           movdqa        $Xi,$T2                 # 1st phase
888           movdqa        $Xi,$T1
889           psllq         \$5,$Xi
890           pxor          $Xi,$T1                 #
891         pclmulqdq       \$0x00,$Hkey,$Xln       #######
892           psllq         \$1,$Xi
893           pxor          $T1,$Xi                 #
894           psllq         \$57,$Xi                #
895           movdqa        $Xi,$T1                 #
896           pslldq        \$8,$Xi
897           psrldq        \$8,$T1                 #       
898           pxor          $T2,$Xi
899         pshufd          \$0b01001110,$Xhn,$Xmn
900           pxor          $T1,$Xhi                #
901         pxor            $Xhn,$Xmn               #
902
903           movdqa        $Xi,$T2                 # 2nd phase
904           psrlq         \$1,$Xi
905         pclmulqdq       \$0x11,$Hkey,$Xhn       #######
906           pxor          $T2,$Xhi                #
907           pxor          $Xi,$T2
908           psrlq         \$5,$Xi
909           pxor          $T2,$Xi                 #
910         lea             32($inp),$inp
911           psrlq         \$1,$Xi                 #
912         pclmulqdq       \$0x00,$HK,$Xmn         #######
913           pxor          $Xhi,$Xi                #
914
915         sub             \$0x20,$len
916         ja              .Lmod_loop
917
918 .Leven_tail:
919          movdqa         $Xi,$Xhi
920          movdqa         $Xmn,$T1
921          pshufd         \$0b01001110,$Xi,$Xmn   #
922          pxor           $Xi,$Xmn                #
923
924         pclmulqdq       \$0x00,$Hkey2,$Xi
925         pclmulqdq       \$0x11,$Hkey2,$Xhi
926         pclmulqdq       \$0x10,$HK,$Xmn
927
928         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
929         pxor            $Xhn,$Xhi
930         pxor            $Xi,$T1
931         pxor            $Xhi,$T1
932         pxor            $T1,$Xmn
933         movdqa          $Xmn,$T1                #
934         psrldq          \$8,$T1
935         pslldq          \$8,$Xmn                #
936         pxor            $T1,$Xhi
937         pxor            $Xmn,$Xi                #
938 ___
939         &reduction_alg9 ($Xhi,$Xi);
940 $code.=<<___;
941         test            $len,$len
942         jnz             .Ldone
943
944 .Lodd_tail:
945         movdqu          ($inp),$T1              # Ii
946         pshufb          $T3,$T1
947         pxor            $T1,$Xi                 # Ii+Xi
948 ___
949         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H*(Ii+Xi)
950         &reduction_alg9 ($Xhi,$Xi);
951 $code.=<<___;
952 .Ldone:
953         pshufb          $T3,$Xi
954         movdqu          $Xi,($Xip)
955 ___
956 $code.=<<___ if ($win64);
957         movaps  (%rsp),%xmm6
958         movaps  0x10(%rsp),%xmm7
959         movaps  0x20(%rsp),%xmm8
960         movaps  0x30(%rsp),%xmm9
961         movaps  0x40(%rsp),%xmm10
962         movaps  0x50(%rsp),%xmm11
963         movaps  0x60(%rsp),%xmm12
964         movaps  0x70(%rsp),%xmm13
965         movaps  0x80(%rsp),%xmm14
966         movaps  0x90(%rsp),%xmm15
967         lea     0xa8(%rsp),%rsp
968 .LSEH_end_gcm_ghash_clmul:
969 ___
970 $code.=<<___;
971         ret
972 .size   gcm_ghash_clmul,.-gcm_ghash_clmul
973 ___
974 }
975 \f
976 $code.=<<___;
977 .globl  gcm_init_avx
978 .type   gcm_init_avx,\@abi-omnipotent
979 .align  32
980 gcm_init_avx:
981 ___
982 if ($avx) {
983 my ($Htbl,$Xip)=@_4args;
984 my $HK="%xmm6";
985
986 $code.=<<___ if ($win64);
987 .LSEH_begin_gcm_init_avx:
988         # I can't trust assembler to use specific encoding:-(
989         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
990         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
991 ___
992 $code.=<<___;
993         vzeroupper
994
995         vmovdqu         ($Xip),$Hkey
996         vpshufd         \$0b01001110,$Hkey,$Hkey        # dword swap
997
998         # <<1 twist
999         vpshufd         \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
1000         vpsrlq          \$63,$Hkey,$T1
1001         vpsllq          \$1,$Hkey,$Hkey
1002         vpxor           $T3,$T3,$T3             #
1003         vpcmpgtd        $T2,$T3,$T3             # broadcast carry bit
1004         vpslldq         \$8,$T1,$T1
1005         vpor            $T1,$Hkey,$Hkey         # H<<=1
1006
1007         # magic reduction
1008         vpand           .L0x1c2_polynomial(%rip),$T3,$T3
1009         vpxor           $T3,$Hkey,$Hkey         # if(carry) H^=0x1c2_polynomial
1010
1011         vpunpckhqdq     $Hkey,$Hkey,$HK
1012         vmovdqa         $Hkey,$Xi
1013         vpxor           $Hkey,$HK,$HK
1014         mov             \$4,%r10                # up to H^8
1015         jmp             .Linit_start_avx
1016 ___
1017
1018 sub clmul64x64_avx {
1019 my ($Xhi,$Xi,$Hkey,$HK)=@_;
1020
1021 if (!defined($HK)) {    $HK = $T2;
1022 $code.=<<___;
1023         vpunpckhqdq     $Xi,$Xi,$T1
1024         vpunpckhqdq     $Hkey,$Hkey,$T2
1025         vpxor           $Xi,$T1,$T1             #
1026         vpxor           $Hkey,$T2,$T2
1027 ___
1028 } else {
1029 $code.=<<___;
1030         vpunpckhqdq     $Xi,$Xi,$T1
1031         vpxor           $Xi,$T1,$T1             #
1032 ___
1033 }
1034 $code.=<<___;
1035         vpclmulqdq      \$0x11,$Hkey,$Xi,$Xhi   #######
1036         vpclmulqdq      \$0x00,$Hkey,$Xi,$Xi    #######
1037         vpclmulqdq      \$0x00,$HK,$T1,$T1      #######
1038         vpxor           $Xi,$Xhi,$T2            #
1039         vpxor           $T2,$T1,$T1             #
1040
1041         vpslldq         \$8,$T1,$T2             #
1042         vpsrldq         \$8,$T1,$T1
1043         vpxor           $T2,$Xi,$Xi             #
1044         vpxor           $T1,$Xhi,$Xhi
1045 ___
1046 }
1047
1048 sub reduction_avx {
1049 my ($Xhi,$Xi) = @_;
1050
1051 $code.=<<___;
1052         vpsllq          \$57,$Xi,$T1            # 1st phase
1053         vpsllq          \$62,$Xi,$T2
1054         vpxor           $T1,$T2,$T2             #
1055         vpsllq          \$63,$Xi,$T1
1056         vpxor           $T1,$T2,$T2             #
1057         vpslldq         \$8,$T2,$T1             #
1058         vpsrldq         \$8,$T2,$T2
1059         vpxor           $T1,$Xi,$Xi             #
1060         vpxor           $T2,$Xhi,$Xhi
1061
1062         vpsrlq          \$1,$Xi,$T2             # 2nd phase
1063         vpxor           $Xi,$Xhi,$Xhi
1064         vpxor           $T2,$Xi,$Xi             #
1065         vpsrlq          \$5,$T2,$T2
1066         vpxor           $T2,$Xi,$Xi             #
1067         vpsrlq          \$1,$Xi,$Xi             #
1068         vpxor           $Xhi,$Xi,$Xi            #
1069 ___
1070 }
1071
1072 $code.=<<___;
1073 .align  32
1074 .Linit_loop_avx:
1075         vpalignr        \$8,$T1,$T2,$T3         # low part is H.lo^H.hi...
1076         vmovdqu         $T3,-0x10($Htbl)        # save Karatsuba "salt"
1077 ___
1078         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^3,5,7
1079         &reduction_avx  ($Xhi,$Xi);
1080 $code.=<<___;
1081 .Linit_start_avx:
1082         vmovdqa         $Xi,$T3
1083 ___
1084         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^2,4,6,8
1085         &reduction_avx  ($Xhi,$Xi);
1086 $code.=<<___;
1087         vpshufd         \$0b01001110,$T3,$T1
1088         vpshufd         \$0b01001110,$Xi,$T2
1089         vpxor           $T3,$T1,$T1             # Karatsuba pre-processing
1090         vmovdqu         $T3,0x00($Htbl)         # save H^1,3,5,7
1091         vpxor           $Xi,$T2,$T2             # Karatsuba pre-processing
1092         vmovdqu         $Xi,0x10($Htbl)         # save H^2,4,6,8
1093         lea             0x30($Htbl),$Htbl
1094         sub             \$1,%r10
1095         jnz             .Linit_loop_avx
1096
1097         vpalignr        \$8,$T2,$T1,$T3         # last "salt" is flipped
1098         vmovdqu         $T3,-0x10($Htbl)
1099
1100         vzeroupper
1101 ___
1102 $code.=<<___ if ($win64);
1103         movaps  (%rsp),%xmm6
1104         lea     0x18(%rsp),%rsp
1105 .LSEH_end_gcm_init_avx:
1106 ___
1107 $code.=<<___;
1108         ret
1109 .size   gcm_init_avx,.-gcm_init_avx
1110 ___
1111 } else {
1112 $code.=<<___;
1113         jmp     .L_init_clmul
1114 .size   gcm_init_avx,.-gcm_init_avx
1115 ___
1116 }
1117
1118 $code.=<<___;
1119 .globl  gcm_gmult_avx
1120 .type   gcm_gmult_avx,\@abi-omnipotent
1121 .align  32
1122 gcm_gmult_avx:
1123         jmp     .L_gmult_clmul
1124 .size   gcm_gmult_avx,.-gcm_gmult_avx
1125 ___
1126 \f
1127 $code.=<<___;
1128 .globl  gcm_ghash_avx
1129 .type   gcm_ghash_avx,\@abi-omnipotent
1130 .align  32
1131 gcm_ghash_avx:
1132 ___
1133 if ($avx) {
1134 my ($Xip,$Htbl,$inp,$len)=@_4args;
1135 my ($Xlo,$Xhi,$Xmi,
1136     $Zlo,$Zhi,$Zmi,
1137     $Hkey,$HK,$T1,$T2,
1138     $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1139
1140 $code.=<<___ if ($win64);
1141         lea     -0x88(%rsp),%rax
1142 .LSEH_begin_gcm_ghash_avx:
1143         # I can't trust assembler to use specific encoding:-(
1144         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1145         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
1146         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
1147         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
1148         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
1149         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
1150         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
1151         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
1152         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
1153         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
1154         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
1155 ___
1156 $code.=<<___;
1157         vzeroupper
1158
1159         vmovdqu         ($Xip),$Xi              # load $Xi
1160         lea             .L0x1c2_polynomial(%rip),%r10
1161         lea             0x40($Htbl),$Htbl       # size optimization
1162         vmovdqu         .Lbswap_mask(%rip),$bswap
1163         vpshufb         $bswap,$Xi,$Xi
1164         cmp             \$0x80,$len
1165         jb              .Lshort_avx
1166         sub             \$0x80,$len
1167
1168         vmovdqu         0x70($inp),$Ii          # I[7]
1169         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1170         vpshufb         $bswap,$Ii,$Ii
1171         vmovdqu         0x20-0x40($Htbl),$HK
1172
1173         vpunpckhqdq     $Ii,$Ii,$T2
1174          vmovdqu        0x60($inp),$Ij          # I[6]
1175         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1176         vpxor           $Ii,$T2,$T2
1177          vpshufb        $bswap,$Ij,$Ij
1178         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1179          vmovdqu        0x10-0x40($Htbl),$Hkey  # $Hkey^2
1180          vpunpckhqdq    $Ij,$Ij,$T1
1181          vmovdqu        0x50($inp),$Ii          # I[5]
1182         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1183          vpxor          $Ij,$T1,$T1
1184
1185          vpshufb        $bswap,$Ii,$Ii
1186         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1187          vpunpckhqdq    $Ii,$Ii,$T2
1188         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1189          vmovdqu        0x30-0x40($Htbl),$Hkey  # $Hkey^3
1190          vpxor          $Ii,$T2,$T2
1191          vmovdqu        0x40($inp),$Ij          # I[4]
1192         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1193          vmovdqu        0x50-0x40($Htbl),$HK
1194
1195          vpshufb        $bswap,$Ij,$Ij
1196         vpxor           $Xlo,$Zlo,$Zlo
1197         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1198         vpxor           $Xhi,$Zhi,$Zhi
1199          vpunpckhqdq    $Ij,$Ij,$T1
1200         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1201          vmovdqu        0x40-0x40($Htbl),$Hkey  # $Hkey^4
1202         vpxor           $Xmi,$Zmi,$Zmi
1203         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1204          vpxor          $Ij,$T1,$T1
1205
1206          vmovdqu        0x30($inp),$Ii          # I[3]
1207         vpxor           $Zlo,$Xlo,$Xlo
1208         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1209         vpxor           $Zhi,$Xhi,$Xhi
1210          vpshufb        $bswap,$Ii,$Ii
1211         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1212          vmovdqu        0x60-0x40($Htbl),$Hkey  # $Hkey^5
1213         vpxor           $Zmi,$Xmi,$Xmi
1214          vpunpckhqdq    $Ii,$Ii,$T2
1215         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1216          vmovdqu        0x80-0x40($Htbl),$HK
1217          vpxor          $Ii,$T2,$T2
1218
1219          vmovdqu        0x20($inp),$Ij          # I[2]
1220         vpxor           $Xlo,$Zlo,$Zlo
1221         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1222         vpxor           $Xhi,$Zhi,$Zhi
1223          vpshufb        $bswap,$Ij,$Ij
1224         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1225          vmovdqu        0x70-0x40($Htbl),$Hkey  # $Hkey^6
1226         vpxor           $Xmi,$Zmi,$Zmi
1227          vpunpckhqdq    $Ij,$Ij,$T1
1228         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1229          vpxor          $Ij,$T1,$T1
1230
1231          vmovdqu        0x10($inp),$Ii          # I[1]
1232         vpxor           $Zlo,$Xlo,$Xlo
1233         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1234         vpxor           $Zhi,$Xhi,$Xhi
1235          vpshufb        $bswap,$Ii,$Ii
1236         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1237          vmovdqu        0x90-0x40($Htbl),$Hkey  # $Hkey^7
1238         vpxor           $Zmi,$Xmi,$Xmi
1239          vpunpckhqdq    $Ii,$Ii,$T2
1240         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1241          vmovdqu        0xb0-0x40($Htbl),$HK
1242          vpxor          $Ii,$T2,$T2
1243
1244          vmovdqu        ($inp),$Ij              # I[0]
1245         vpxor           $Xlo,$Zlo,$Zlo
1246         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1247         vpxor           $Xhi,$Zhi,$Zhi
1248          vpshufb        $bswap,$Ij,$Ij
1249         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1250          vmovdqu        0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1251         vpxor           $Xmi,$Zmi,$Zmi
1252         vpclmulqdq      \$0x10,$HK,$T2,$Xmi
1253
1254         lea             0x80($inp),$inp
1255         cmp             \$0x80,$len
1256         jb              .Ltail_avx
1257
1258         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1259         sub             \$0x80,$len
1260         jmp             .Loop8x_avx
1261
1262 .align  32
1263 .Loop8x_avx:
1264         vpunpckhqdq     $Ij,$Ij,$T1
1265          vmovdqu        0x70($inp),$Ii          # I[7]
1266         vpxor           $Xlo,$Zlo,$Zlo
1267         vpxor           $Ij,$T1,$T1
1268         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xi
1269          vpshufb        $bswap,$Ii,$Ii
1270         vpxor           $Xhi,$Zhi,$Zhi
1271         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xo
1272          vmovdqu        0x00-0x40($Htbl),$Hkey  # $Hkey^1
1273          vpunpckhqdq    $Ii,$Ii,$T2
1274         vpxor           $Xmi,$Zmi,$Zmi
1275         vpclmulqdq      \$0x00,$HK,$T1,$Tred
1276          vmovdqu        0x20-0x40($Htbl),$HK
1277          vpxor          $Ii,$T2,$T2
1278
1279           vmovdqu       0x60($inp),$Ij          # I[6]
1280          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1281         vpxor           $Zlo,$Xi,$Xi            # collect result
1282           vpshufb       $bswap,$Ij,$Ij
1283          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1284         vxorps          $Zhi,$Xo,$Xo
1285           vmovdqu       0x10-0x40($Htbl),$Hkey  # $Hkey^2
1286          vpunpckhqdq    $Ij,$Ij,$T1
1287          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1288         vpxor           $Zmi,$Tred,$Tred
1289          vxorps         $Ij,$T1,$T1
1290
1291           vmovdqu       0x50($inp),$Ii          # I[5]
1292         vpxor           $Xi,$Tred,$Tred         # aggregated Karatsuba post-processing
1293          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1294         vpxor           $Xo,$Tred,$Tred
1295         vpslldq         \$8,$Tred,$T2
1296          vpxor          $Xlo,$Zlo,$Zlo
1297          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1298         vpsrldq         \$8,$Tred,$Tred
1299         vpxor           $T2, $Xi, $Xi
1300           vmovdqu       0x30-0x40($Htbl),$Hkey  # $Hkey^3
1301           vpshufb       $bswap,$Ii,$Ii
1302         vxorps          $Tred,$Xo, $Xo
1303          vpxor          $Xhi,$Zhi,$Zhi
1304          vpunpckhqdq    $Ii,$Ii,$T2
1305          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1306           vmovdqu       0x50-0x40($Htbl),$HK
1307          vpxor          $Ii,$T2,$T2
1308          vpxor          $Xmi,$Zmi,$Zmi
1309
1310           vmovdqu       0x40($inp),$Ij          # I[4]
1311         vpalignr        \$8,$Xi,$Xi,$Tred       # 1st phase
1312          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1313           vpshufb       $bswap,$Ij,$Ij
1314          vpxor          $Zlo,$Xlo,$Xlo
1315          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1316           vmovdqu       0x40-0x40($Htbl),$Hkey  # $Hkey^4
1317          vpunpckhqdq    $Ij,$Ij,$T1
1318          vpxor          $Zhi,$Xhi,$Xhi
1319          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1320          vxorps         $Ij,$T1,$T1
1321          vpxor          $Zmi,$Xmi,$Xmi
1322
1323           vmovdqu       0x30($inp),$Ii          # I[3]
1324         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1325          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1326           vpshufb       $bswap,$Ii,$Ii
1327          vpxor          $Xlo,$Zlo,$Zlo
1328          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1329           vmovdqu       0x60-0x40($Htbl),$Hkey  # $Hkey^5
1330          vpunpckhqdq    $Ii,$Ii,$T2
1331          vpxor          $Xhi,$Zhi,$Zhi
1332          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1333           vmovdqu       0x80-0x40($Htbl),$HK
1334          vpxor          $Ii,$T2,$T2
1335          vpxor          $Xmi,$Zmi,$Zmi
1336
1337           vmovdqu       0x20($inp),$Ij          # I[2]
1338          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1339           vpshufb       $bswap,$Ij,$Ij
1340          vpxor          $Zlo,$Xlo,$Xlo
1341          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1342           vmovdqu       0x70-0x40($Htbl),$Hkey  # $Hkey^6
1343          vpunpckhqdq    $Ij,$Ij,$T1
1344          vpxor          $Zhi,$Xhi,$Xhi
1345          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1346          vpxor          $Ij,$T1,$T1
1347          vpxor          $Zmi,$Xmi,$Xmi
1348         vxorps          $Tred,$Xi,$Xi
1349
1350           vmovdqu       0x10($inp),$Ii          # I[1]
1351         vpalignr        \$8,$Xi,$Xi,$Tred       # 2nd phase
1352          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1353           vpshufb       $bswap,$Ii,$Ii
1354          vpxor          $Xlo,$Zlo,$Zlo
1355          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1356           vmovdqu       0x90-0x40($Htbl),$Hkey  # $Hkey^7
1357         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1358         vxorps          $Xo,$Tred,$Tred
1359          vpunpckhqdq    $Ii,$Ii,$T2
1360          vpxor          $Xhi,$Zhi,$Zhi
1361          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1362           vmovdqu       0xb0-0x40($Htbl),$HK
1363          vpxor          $Ii,$T2,$T2
1364          vpxor          $Xmi,$Zmi,$Zmi
1365
1366           vmovdqu       ($inp),$Ij              # I[0]
1367          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1368           vpshufb       $bswap,$Ij,$Ij
1369          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1370           vmovdqu       0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1371         vpxor           $Tred,$Ij,$Ij
1372          vpclmulqdq     \$0x10,$HK,  $T2,$Xmi
1373         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1374
1375         lea             0x80($inp),$inp
1376         sub             \$0x80,$len
1377         jnc             .Loop8x_avx
1378
1379         add             \$0x80,$len
1380         jmp             .Ltail_no_xor_avx
1381
1382 .align  32
1383 .Lshort_avx:
1384         vmovdqu         -0x10($inp,$len),$Ii    # very last word
1385         lea             ($inp,$len),$inp
1386         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1387         vmovdqu         0x20-0x40($Htbl),$HK
1388         vpshufb         $bswap,$Ii,$Ij
1389
1390         vmovdqa         $Xlo,$Zlo               # subtle way to zero $Zlo,
1391         vmovdqa         $Xhi,$Zhi               # $Zhi and
1392         vmovdqa         $Xmi,$Zmi               # $Zmi
1393         sub             \$0x10,$len
1394         jz              .Ltail_avx
1395
1396         vpunpckhqdq     $Ij,$Ij,$T1
1397         vpxor           $Xlo,$Zlo,$Zlo
1398         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1399         vpxor           $Ij,$T1,$T1
1400          vmovdqu        -0x20($inp),$Ii
1401         vpxor           $Xhi,$Zhi,$Zhi
1402         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1403         vmovdqu         0x10-0x40($Htbl),$Hkey  # $Hkey^2
1404          vpshufb        $bswap,$Ii,$Ij
1405         vpxor           $Xmi,$Zmi,$Zmi
1406         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1407         vpsrldq         \$8,$HK,$HK
1408         sub             \$0x10,$len
1409         jz              .Ltail_avx
1410
1411         vpunpckhqdq     $Ij,$Ij,$T1
1412         vpxor           $Xlo,$Zlo,$Zlo
1413         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1414         vpxor           $Ij,$T1,$T1
1415          vmovdqu        -0x30($inp),$Ii
1416         vpxor           $Xhi,$Zhi,$Zhi
1417         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1418         vmovdqu         0x30-0x40($Htbl),$Hkey  # $Hkey^3
1419          vpshufb        $bswap,$Ii,$Ij
1420         vpxor           $Xmi,$Zmi,$Zmi
1421         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1422         vmovdqu         0x50-0x40($Htbl),$HK
1423         sub             \$0x10,$len
1424         jz              .Ltail_avx
1425
1426         vpunpckhqdq     $Ij,$Ij,$T1
1427         vpxor           $Xlo,$Zlo,$Zlo
1428         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1429         vpxor           $Ij,$T1,$T1
1430          vmovdqu        -0x40($inp),$Ii
1431         vpxor           $Xhi,$Zhi,$Zhi
1432         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1433         vmovdqu         0x40-0x40($Htbl),$Hkey  # $Hkey^4
1434          vpshufb        $bswap,$Ii,$Ij
1435         vpxor           $Xmi,$Zmi,$Zmi
1436         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1437         vpsrldq         \$8,$HK,$HK
1438         sub             \$0x10,$len
1439         jz              .Ltail_avx
1440
1441         vpunpckhqdq     $Ij,$Ij,$T1
1442         vpxor           $Xlo,$Zlo,$Zlo
1443         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1444         vpxor           $Ij,$T1,$T1
1445          vmovdqu        -0x50($inp),$Ii
1446         vpxor           $Xhi,$Zhi,$Zhi
1447         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1448         vmovdqu         0x60-0x40($Htbl),$Hkey  # $Hkey^5
1449          vpshufb        $bswap,$Ii,$Ij
1450         vpxor           $Xmi,$Zmi,$Zmi
1451         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1452         vmovdqu         0x80-0x40($Htbl),$HK
1453         sub             \$0x10,$len
1454         jz              .Ltail_avx
1455
1456         vpunpckhqdq     $Ij,$Ij,$T1
1457         vpxor           $Xlo,$Zlo,$Zlo
1458         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1459         vpxor           $Ij,$T1,$T1
1460          vmovdqu        -0x60($inp),$Ii
1461         vpxor           $Xhi,$Zhi,$Zhi
1462         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1463         vmovdqu         0x70-0x40($Htbl),$Hkey  # $Hkey^6
1464          vpshufb        $bswap,$Ii,$Ij
1465         vpxor           $Xmi,$Zmi,$Zmi
1466         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1467         vpsrldq         \$8,$HK,$HK
1468         sub             \$0x10,$len
1469         jz              .Ltail_avx
1470
1471         vpunpckhqdq     $Ij,$Ij,$T1
1472         vpxor           $Xlo,$Zlo,$Zlo
1473         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1474         vpxor           $Ij,$T1,$T1
1475          vmovdqu        -0x70($inp),$Ii
1476         vpxor           $Xhi,$Zhi,$Zhi
1477         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1478         vmovdqu         0x90-0x40($Htbl),$Hkey  # $Hkey^7
1479          vpshufb        $bswap,$Ii,$Ij
1480         vpxor           $Xmi,$Zmi,$Zmi
1481         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1482         vmovq           0xb8-0x40($Htbl),$HK
1483         sub             \$0x10,$len
1484         jmp             .Ltail_avx
1485
1486 .align  32
1487 .Ltail_avx:
1488         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1489 .Ltail_no_xor_avx:
1490         vpunpckhqdq     $Ij,$Ij,$T1
1491         vpxor           $Xlo,$Zlo,$Zlo
1492         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1493         vpxor           $Ij,$T1,$T1
1494         vpxor           $Xhi,$Zhi,$Zhi
1495         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1496         vpxor           $Xmi,$Zmi,$Zmi
1497         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1498
1499         vmovdqu         (%r10),$Tred
1500
1501         vpxor           $Xlo,$Zlo,$Xi
1502         vpxor           $Xhi,$Zhi,$Xo
1503         vpxor           $Xmi,$Zmi,$Zmi
1504
1505         vpxor           $Xi, $Zmi,$Zmi          # aggregated Karatsuba post-processing
1506         vpxor           $Xo, $Zmi,$Zmi
1507         vpslldq         \$8, $Zmi,$T2
1508         vpsrldq         \$8, $Zmi,$Zmi
1509         vpxor           $T2, $Xi, $Xi
1510         vpxor           $Zmi,$Xo, $Xo
1511
1512         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 1st phase
1513         vpalignr        \$8,$Xi,$Xi,$Xi
1514         vpxor           $T2,$Xi,$Xi
1515
1516         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 2nd phase
1517         vpalignr        \$8,$Xi,$Xi,$Xi
1518         vpxor           $Xo,$Xi,$Xi
1519         vpxor           $T2,$Xi,$Xi
1520
1521         cmp             \$0,$len
1522         jne             .Lshort_avx
1523
1524         vpshufb         $bswap,$Xi,$Xi
1525         vmovdqu         $Xi,($Xip)
1526         vzeroupper
1527 ___
1528 $code.=<<___ if ($win64);
1529         movaps  (%rsp),%xmm6
1530         movaps  0x10(%rsp),%xmm7
1531         movaps  0x20(%rsp),%xmm8
1532         movaps  0x30(%rsp),%xmm9
1533         movaps  0x40(%rsp),%xmm10
1534         movaps  0x50(%rsp),%xmm11
1535         movaps  0x60(%rsp),%xmm12
1536         movaps  0x70(%rsp),%xmm13
1537         movaps  0x80(%rsp),%xmm14
1538         movaps  0x90(%rsp),%xmm15
1539         lea     0xa8(%rsp),%rsp
1540 .LSEH_end_gcm_ghash_avx:
1541 ___
1542 $code.=<<___;
1543         ret
1544 .size   gcm_ghash_avx,.-gcm_ghash_avx
1545 ___
1546 } else {
1547 $code.=<<___;
1548         jmp     .L_ghash_clmul
1549 .size   gcm_ghash_avx,.-gcm_ghash_avx
1550 ___
1551 }
1552 \f
1553 $code.=<<___;
1554 .align  64
1555 .Lbswap_mask:
1556         .byte   15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1557 .L0x1c2_polynomial:
1558         .byte   1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1559 .L7_mask:
1560         .long   7,0,7,0
1561 .L7_mask_poly:
1562         .long   7,0,`0xE1<<1`,0
1563 .align  64
1564 .type   .Lrem_4bit,\@object
1565 .Lrem_4bit:
1566         .long   0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1567         .long   0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1568         .long   0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1569         .long   0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1570 .type   .Lrem_8bit,\@object
1571 .Lrem_8bit:
1572         .value  0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1573         .value  0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1574         .value  0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1575         .value  0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1576         .value  0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1577         .value  0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1578         .value  0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1579         .value  0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1580         .value  0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1581         .value  0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1582         .value  0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1583         .value  0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1584         .value  0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1585         .value  0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1586         .value  0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1587         .value  0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1588         .value  0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1589         .value  0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1590         .value  0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1591         .value  0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1592         .value  0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1593         .value  0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1594         .value  0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1595         .value  0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1596         .value  0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1597         .value  0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1598         .value  0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1599         .value  0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1600         .value  0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1601         .value  0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1602         .value  0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1603         .value  0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1604
1605 .asciz  "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1606 .align  64
1607 ___
1608 \f
1609 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1610 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1611 if ($win64) {
1612 $rec="%rcx";
1613 $frame="%rdx";
1614 $context="%r8";
1615 $disp="%r9";
1616
1617 $code.=<<___;
1618 .extern __imp_RtlVirtualUnwind
1619 .type   se_handler,\@abi-omnipotent
1620 .align  16
1621 se_handler:
1622         push    %rsi
1623         push    %rdi
1624         push    %rbx
1625         push    %rbp
1626         push    %r12
1627         push    %r13
1628         push    %r14
1629         push    %r15
1630         pushfq
1631         sub     \$64,%rsp
1632
1633         mov     120($context),%rax      # pull context->Rax
1634         mov     248($context),%rbx      # pull context->Rip
1635
1636         mov     8($disp),%rsi           # disp->ImageBase
1637         mov     56($disp),%r11          # disp->HandlerData
1638
1639         mov     0(%r11),%r10d           # HandlerData[0]
1640         lea     (%rsi,%r10),%r10        # prologue label
1641         cmp     %r10,%rbx               # context->Rip<prologue label
1642         jb      .Lin_prologue
1643
1644         mov     152($context),%rax      # pull context->Rsp
1645
1646         mov     4(%r11),%r10d           # HandlerData[1]
1647         lea     (%rsi,%r10),%r10        # epilogue label
1648         cmp     %r10,%rbx               # context->Rip>=epilogue label
1649         jae     .Lin_prologue
1650
1651         lea     24(%rax),%rax           # adjust "rsp"
1652
1653         mov     -8(%rax),%rbx
1654         mov     -16(%rax),%rbp
1655         mov     -24(%rax),%r12
1656         mov     %rbx,144($context)      # restore context->Rbx
1657         mov     %rbp,160($context)      # restore context->Rbp
1658         mov     %r12,216($context)      # restore context->R12
1659
1660 .Lin_prologue:
1661         mov     8(%rax),%rdi
1662         mov     16(%rax),%rsi
1663         mov     %rax,152($context)      # restore context->Rsp
1664         mov     %rsi,168($context)      # restore context->Rsi
1665         mov     %rdi,176($context)      # restore context->Rdi
1666
1667         mov     40($disp),%rdi          # disp->ContextRecord
1668         mov     $context,%rsi           # context
1669         mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
1670         .long   0xa548f3fc              # cld; rep movsq
1671
1672         mov     $disp,%rsi
1673         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1674         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1675         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1676         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1677         mov     40(%rsi),%r10           # disp->ContextRecord
1678         lea     56(%rsi),%r11           # &disp->HandlerData
1679         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1680         mov     %r10,32(%rsp)           # arg5
1681         mov     %r11,40(%rsp)           # arg6
1682         mov     %r12,48(%rsp)           # arg7
1683         mov     %rcx,56(%rsp)           # arg8, (NULL)
1684         call    *__imp_RtlVirtualUnwind(%rip)
1685
1686         mov     \$1,%eax                # ExceptionContinueSearch
1687         add     \$64,%rsp
1688         popfq
1689         pop     %r15
1690         pop     %r14
1691         pop     %r13
1692         pop     %r12
1693         pop     %rbp
1694         pop     %rbx
1695         pop     %rdi
1696         pop     %rsi
1697         ret
1698 .size   se_handler,.-se_handler
1699
1700 .section        .pdata
1701 .align  4
1702         .rva    .LSEH_begin_gcm_gmult_4bit
1703         .rva    .LSEH_end_gcm_gmult_4bit
1704         .rva    .LSEH_info_gcm_gmult_4bit
1705
1706         .rva    .LSEH_begin_gcm_ghash_4bit
1707         .rva    .LSEH_end_gcm_ghash_4bit
1708         .rva    .LSEH_info_gcm_ghash_4bit
1709
1710         .rva    .LSEH_begin_gcm_init_clmul
1711         .rva    .LSEH_end_gcm_init_clmul
1712         .rva    .LSEH_info_gcm_init_clmul
1713
1714         .rva    .LSEH_begin_gcm_ghash_clmul
1715         .rva    .LSEH_end_gcm_ghash_clmul
1716         .rva    .LSEH_info_gcm_ghash_clmul
1717 ___
1718 $code.=<<___    if ($avx);
1719         .rva    .LSEH_begin_gcm_init_avx
1720         .rva    .LSEH_end_gcm_init_avx
1721         .rva    .LSEH_info_gcm_init_clmul
1722
1723         .rva    .LSEH_begin_gcm_ghash_avx
1724         .rva    .LSEH_end_gcm_ghash_avx
1725         .rva    .LSEH_info_gcm_ghash_clmul
1726 ___
1727 $code.=<<___;
1728 .section        .xdata
1729 .align  8
1730 .LSEH_info_gcm_gmult_4bit:
1731         .byte   9,0,0,0
1732         .rva    se_handler
1733         .rva    .Lgmult_prologue,.Lgmult_epilogue       # HandlerData
1734 .LSEH_info_gcm_ghash_4bit:
1735         .byte   9,0,0,0
1736         .rva    se_handler
1737         .rva    .Lghash_prologue,.Lghash_epilogue       # HandlerData
1738 .LSEH_info_gcm_init_clmul:
1739         .byte   0x01,0x08,0x03,0x00
1740         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1741         .byte   0x04,0x22,0x00,0x00     #sub    rsp,0x18
1742 .LSEH_info_gcm_ghash_clmul:
1743         .byte   0x01,0x33,0x16,0x00
1744         .byte   0x33,0xf8,0x09,0x00     #movaps 0x90(rsp),xmm15
1745         .byte   0x2e,0xe8,0x08,0x00     #movaps 0x80(rsp),xmm14
1746         .byte   0x29,0xd8,0x07,0x00     #movaps 0x70(rsp),xmm13
1747         .byte   0x24,0xc8,0x06,0x00     #movaps 0x60(rsp),xmm12
1748         .byte   0x1f,0xb8,0x05,0x00     #movaps 0x50(rsp),xmm11
1749         .byte   0x1a,0xa8,0x04,0x00     #movaps 0x40(rsp),xmm10
1750         .byte   0x15,0x98,0x03,0x00     #movaps 0x30(rsp),xmm9
1751         .byte   0x10,0x88,0x02,0x00     #movaps 0x20(rsp),xmm8
1752         .byte   0x0c,0x78,0x01,0x00     #movaps 0x10(rsp),xmm7
1753         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1754         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1755 ___
1756 }
1757 \f
1758 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1759
1760 print $code;
1761
1762 close STDOUT;