2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # This module implements support for ARMv8 AES instructions. The
18 # module is endian-agnostic in sense that it supports both big- and
19 # little-endian cases. As does it support both 32- and 64-bit modes
20 # of operation. Latter is achieved by limiting amount of utilized
21 # registers to 16, which implies additional NEON load and integer
22 # instructions. This has no effect on mighty Apple A7, where results
23 # are literally equal to the theoretical estimates based on AES
24 # instruction latencies and issue rates. On Cortex-A53, an in-order
25 # execution core, this costs up to 10-15%, which is partially
26 # compensated by implementing dedicated code path for 128-bit
27 # CBC encrypt case. On Cortex-A57 parallelizable mode performance
28 # seems to be limited by sheer amount of NEON instructions...
30 # Performance in cycles per byte processed with 128-bit key:
33 # Apple A7 2.39 1.20 1.20
34 # Cortex-A53 1.32 1.29 1.46
35 # Cortex-A57(*) 1.95 0.85 0.93
36 # Denver 1.96 0.86 0.80
37 # Mongoose 1.33 1.20 1.20
39 # (*) original 3.64/1.34/1.32 results were for r0p0 revision
40 # and are still same even for updated module;
45 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
46 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
47 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
48 die "can't locate arm-xlate.pl";
50 open OUT,"| \"$^X\" $xlate $flavour $output";
58 #if __ARM_MAX_ARCH__>=7
61 $code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/);
62 $code.=".arch armv7-a\n.fpu neon\n.code 32\n" if ($flavour !~ /64/);
63 #^^^^^^ this is done to simplify adoption by not depending
66 # Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
67 # NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
68 # maintain both 32- and 64-bit codes within single module and
69 # transliterate common code to either flavour with regex vodoo.
72 my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
73 my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
74 $flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
80 .long 0x01,0x01,0x01,0x01
81 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
82 .long 0x1b,0x1b,0x1b,0x1b
84 .globl ${prefix}_set_encrypt_key
85 .type ${prefix}_set_encrypt_key,%function
87 ${prefix}_set_encrypt_key:
90 $code.=<<___ if ($flavour =~ /64/);
91 stp x29,x30,[sp,#-16]!
111 veor $zero,$zero,$zero
112 vld1.8 {$in0},[$inp],#16
113 mov $bits,#8 // reuse $bits
114 vld1.32 {$rcon,$mask},[$ptr],#32
122 vtbl.8 $key,{$in0},$mask
123 vext.8 $tmp,$zero,$in0,#12
124 vst1.32 {$in0},[$out],#16
129 vext.8 $tmp,$zero,$tmp,#12
131 vext.8 $tmp,$zero,$tmp,#12
134 vshl.u8 $rcon,$rcon,#1
138 vld1.32 {$rcon},[$ptr]
140 vtbl.8 $key,{$in0},$mask
141 vext.8 $tmp,$zero,$in0,#12
142 vst1.32 {$in0},[$out],#16
146 vext.8 $tmp,$zero,$tmp,#12
148 vext.8 $tmp,$zero,$tmp,#12
151 vshl.u8 $rcon,$rcon,#1
154 vtbl.8 $key,{$in0},$mask
155 vext.8 $tmp,$zero,$in0,#12
156 vst1.32 {$in0},[$out],#16
160 vext.8 $tmp,$zero,$tmp,#12
162 vext.8 $tmp,$zero,$tmp,#12
166 vst1.32 {$in0},[$out]
174 vld1.8 {$in1},[$inp],#8
175 vmov.i8 $key,#8 // borrow $key
176 vst1.32 {$in0},[$out],#16
177 vsub.i8 $mask,$mask,$key // adjust the mask
180 vtbl.8 $key,{$in1},$mask
181 vext.8 $tmp,$zero,$in0,#12
182 vst1.32 {$in1},[$out],#8
187 vext.8 $tmp,$zero,$tmp,#12
189 vext.8 $tmp,$zero,$tmp,#12
192 vdup.32 $tmp,${in0}[3]
195 vext.8 $in1,$zero,$in1,#12
196 vshl.u8 $rcon,$rcon,#1
200 vst1.32 {$in0},[$out],#16
212 vst1.32 {$in0},[$out],#16
215 vtbl.8 $key,{$in1},$mask
216 vext.8 $tmp,$zero,$in0,#12
217 vst1.32 {$in1},[$out],#16
222 vext.8 $tmp,$zero,$tmp,#12
224 vext.8 $tmp,$zero,$tmp,#12
227 vshl.u8 $rcon,$rcon,#1
229 vst1.32 {$in0},[$out],#16
232 vdup.32 $key,${in0}[3] // just splat
233 vext.8 $tmp,$zero,$in1,#12
237 vext.8 $tmp,$zero,$tmp,#12
239 vext.8 $tmp,$zero,$tmp,#12
250 mov x0,$ptr // return value
251 `"ldr x29,[sp],#16" if ($flavour =~ /64/)`
253 .size ${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
255 .globl ${prefix}_set_decrypt_key
256 .type ${prefix}_set_decrypt_key,%function
258 ${prefix}_set_decrypt_key:
260 $code.=<<___ if ($flavour =~ /64/);
261 stp x29,x30,[sp,#-16]!
264 $code.=<<___ if ($flavour !~ /64/);
273 sub $out,$out,#240 // restore original $out
275 add $inp,$out,x12,lsl#4 // end of key schedule
277 vld1.32 {v0.16b},[$out]
278 vld1.32 {v1.16b},[$inp]
279 vst1.32 {v0.16b},[$inp],x4
280 vst1.32 {v1.16b},[$out],#16
283 vld1.32 {v0.16b},[$out]
284 vld1.32 {v1.16b},[$inp]
287 vst1.32 {v0.16b},[$inp],x4
288 vst1.32 {v1.16b},[$out],#16
292 vld1.32 {v0.16b},[$out]
294 vst1.32 {v0.16b},[$inp]
296 eor x0,x0,x0 // return value
299 $code.=<<___ if ($flavour !~ /64/);
302 $code.=<<___ if ($flavour =~ /64/);
307 .size ${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
313 my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
314 my ($inp,$out,$key)=map("x$_",(0..2));
316 my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
319 .globl ${prefix}_${dir}crypt
320 .type ${prefix}_${dir}crypt,%function
322 ${prefix}_${dir}crypt:
323 ldr $rounds,[$key,#240]
324 vld1.32 {$rndkey0},[$key],#16
325 vld1.8 {$inout},[$inp]
326 sub $rounds,$rounds,#2
327 vld1.32 {$rndkey1},[$key],#16
330 aes$e $inout,$rndkey0
332 vld1.32 {$rndkey0},[$key],#16
333 subs $rounds,$rounds,#2
334 aes$e $inout,$rndkey1
336 vld1.32 {$rndkey1},[$key],#16
339 aes$e $inout,$rndkey0
341 vld1.32 {$rndkey0},[$key]
342 aes$e $inout,$rndkey1
343 veor $inout,$inout,$rndkey0
345 vst1.8 {$inout},[$out]
347 .size ${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
354 my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
355 my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
356 my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
358 my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
359 my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
361 ### q8-q15 preloaded key schedule
364 .globl ${prefix}_cbc_encrypt
365 .type ${prefix}_cbc_encrypt,%function
367 ${prefix}_cbc_encrypt:
369 $code.=<<___ if ($flavour =~ /64/);
370 stp x29,x30,[sp,#-16]!
373 $code.=<<___ if ($flavour !~ /64/);
376 vstmdb sp!,{d8-d15} @ ABI specification says so
377 ldmia ip,{r4-r5} @ load remaining args
385 cmp $enc,#0 // en- or decrypting?
386 ldr $rounds,[$key,#240]
388 vld1.8 {$ivec},[$ivp]
389 vld1.8 {$dat},[$inp],$step
391 vld1.32 {q8-q9},[$key] // load key schedule...
392 sub $rounds,$rounds,#6
393 add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
394 sub $rounds,$rounds,#2
395 vld1.32 {q10-q11},[$key_],#32
396 vld1.32 {q12-q13},[$key_],#32
397 vld1.32 {q14-q15},[$key_],#32
398 vld1.32 {$rndlast},[$key_]
406 veor $rndzero_n_last,q8,$rndlast
409 vld1.32 {$in0-$in1},[$key_]
423 vst1.8 {$ivec},[$out],#16
455 vld1.8 {q8},[$inp],$step
458 veor q8,q8,$rndzero_n_last
461 vld1.32 {q9},[$key_] // re-pre-load rndkey[1]
465 veor $ivec,$dat,$rndlast
468 vst1.8 {$ivec},[$out],#16
473 vld1.32 {$in0-$in1},[$key_]
480 vst1.8 {$ivec},[$out],#16
494 vld1.8 {q8},[$inp],$step
501 veor q8,q8,$rndzero_n_last
503 veor $ivec,$dat,$rndlast
504 b.hs .Loop_cbc_enc128
506 vst1.8 {$ivec},[$out],#16
510 my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
514 vld1.8 {$dat2},[$inp],#16
515 subs $len,$len,#32 // bias
519 vorr $in2,$dat2,$dat2
522 vorr $dat1,$dat2,$dat2
523 vld1.8 {$dat2},[$inp],#16
525 vorr $in1,$dat1,$dat1
526 vorr $in2,$dat2,$dat2
535 vld1.32 {q8},[$key_],#16
543 vld1.32 {q9},[$key_],#16
552 veor $tmp0,$ivec,$rndlast
554 veor $tmp1,$in0,$rndlast
555 mov.lo x6,$len // x6, $cnt, is zero at this point
562 veor $tmp2,$in1,$rndlast
563 add $inp,$inp,x6 // $inp is adjusted in such way that
564 // at exit from the loop $dat1-$dat2
565 // are loaded with last "words"
574 vld1.8 {$in0},[$inp],#16
581 vld1.8 {$in1},[$inp],#16
588 vld1.8 {$in2},[$inp],#16
592 vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
594 veor $tmp0,$tmp0,$dat0
595 veor $tmp1,$tmp1,$dat1
596 veor $dat2,$dat2,$tmp2
597 vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
598 vst1.8 {$tmp0},[$out],#16
600 vst1.8 {$tmp1},[$out],#16
602 vst1.8 {$dat2},[$out],#16
615 vld1.32 {q8},[$key_],#16
621 vld1.32 {q9},[$key_],#16
641 veor $tmp1,$ivec,$rndlast
646 veor $tmp2,$in1,$rndlast
650 veor $tmp1,$tmp1,$dat1
651 veor $tmp2,$tmp2,$dat2
653 vst1.8 {$tmp1},[$out],#16
654 vst1.8 {$tmp2},[$out],#16
658 veor $tmp1,$tmp1,$dat2
660 vst1.8 {$tmp1},[$out],#16
663 vst1.8 {$ivec},[$ivp]
667 $code.=<<___ if ($flavour !~ /64/);
671 $code.=<<___ if ($flavour =~ /64/);
676 .size ${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
680 my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
681 my ($rounds,$cnt,$key_)=("w5","w6","x7");
682 my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
683 my $step="x12"; # aliases with $tctr2
685 my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
686 my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
688 my ($dat,$tmp)=($dat0,$tmp0);
690 ### q8-q15 preloaded key schedule
693 .globl ${prefix}_ctr32_encrypt_blocks
694 .type ${prefix}_ctr32_encrypt_blocks,%function
696 ${prefix}_ctr32_encrypt_blocks:
698 $code.=<<___ if ($flavour =~ /64/);
699 stp x29,x30,[sp,#-16]!
702 $code.=<<___ if ($flavour !~ /64/);
704 stmdb sp!,{r4-r10,lr}
705 vstmdb sp!,{d8-d15} @ ABI specification says so
706 ldr r4, [ip] @ load remaining arg
709 ldr $rounds,[$key,#240]
711 ldr $ctr, [$ivp, #12]
712 vld1.32 {$dat0},[$ivp]
714 vld1.32 {q8-q9},[$key] // load key schedule...
715 sub $rounds,$rounds,#4
718 add $key_,$key,x5,lsl#4 // pointer to last 5 round keys
719 sub $rounds,$rounds,#2
720 vld1.32 {q12-q13},[$key_],#32
721 vld1.32 {q14-q15},[$key_],#32
722 vld1.32 {$rndlast},[$key_]
729 vorr $dat1,$dat0,$dat0
731 vorr $dat2,$dat0,$dat0
733 vorr $ivec,$dat0,$dat0
735 vmov.32 ${dat1}[3],$tctr1
738 sub $len,$len,#3 // bias
739 vmov.32 ${dat2}[3],$tctr2
750 vld1.32 {q8},[$key_],#16
758 vld1.32 {q9},[$key_],#16
765 vld1.8 {$in0},[$inp],#16
766 vorr $dat0,$ivec,$ivec
769 vld1.8 {$in1},[$inp],#16
770 vorr $dat1,$ivec,$ivec
775 vld1.8 {$in2},[$inp],#16
779 vorr $dat2,$ivec,$ivec
785 veor $in0,$in0,$rndlast
789 veor $in1,$in1,$rndlast
795 veor $in2,$in2,$rndlast
799 vmov.32 ${dat0}[3], $tctr0
805 vmov.32 ${dat1}[3], $tctr1
809 vmov.32 ${dat2}[3], $tctr2
816 vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
817 vst1.8 {$in0},[$out],#16
820 vst1.8 {$in1},[$out],#16
822 vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
823 vst1.8 {$in2},[$out],#16
837 vld1.32 {q8},[$key_],#16
843 vld1.32 {q9},[$key_],#16
854 vld1.8 {$in0},[$inp],$step
864 veor $in0,$in0,$rndlast
869 veor $in1,$in1,$rndlast
876 vst1.8 {$in0},[$out],#16
882 $code.=<<___ if ($flavour !~ /64/);
884 ldmia sp!,{r4-r10,pc}
886 $code.=<<___ if ($flavour =~ /64/);
891 .size ${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
897 ########################################
898 if ($flavour =~ /64/) { ######## 64-bit code
900 "aesd" => 0x4e285800, "aese" => 0x4e284800,
901 "aesimc"=> 0x4e287800, "aesmc" => 0x4e286800 );
904 my ($mnemonic,$arg)=@_;
906 $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o &&
907 sprintf ".inst\t0x%08x\t//%s %s",
908 $opcode{$mnemonic}|$1|($2<<5),
912 foreach(split("\n",$code)) {
913 s/\`([^\`]*)\`/eval($1)/geo;
915 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
916 s/@\s/\/\//o; # old->new style commentary
918 #s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
919 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
920 s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel $2,$3,$2,$1/o or
921 s/vmov\.i8/movi/o or # fix up legacy mnemonics
923 s/vrev32\.8/rev32/o or
926 s/^(\s+)v/$1/o or # strip off v prefix
929 # fix up remainig legacy suffixes
931 m/\],#8/o and s/\.16b/\.8b/go;
932 s/\.[ui]?32//o and s/\.16b/\.4s/go;
933 s/\.[ui]?64//o and s/\.16b/\.2d/go;
934 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
938 } else { ######## 32-bit code
940 "aesd" => 0xf3b00340, "aese" => 0xf3b00300,
941 "aesimc"=> 0xf3b003c0, "aesmc" => 0xf3b00380 );
944 my ($mnemonic,$arg)=@_;
946 if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
947 my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
948 |(($2&7)<<1) |(($2&8)<<2);
949 # since ARMv7 instructions are always encoded little-endian.
950 # correct solution is to use .inst directive, but older
951 # assemblers don't implement it:-(
952 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
953 $word&0xff,($word>>8)&0xff,
954 ($word>>16)&0xff,($word>>24)&0xff,
962 $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
963 sprintf "vtbl.8 d%d,{q%d},d%d\n\t".
964 "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
970 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
971 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
977 $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
978 sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
981 foreach(split("\n",$code)) {
982 s/\`([^\`]*)\`/eval($1)/geo;
984 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
985 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
986 s/\/\/\s?/@ /o; # new->old style commentary
988 # fix up remainig new-style suffixes
989 s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or
992 s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
993 s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
994 s/vtbl\.8\s+(.*)/unvtbl($1)/geo or
995 s/vdup\.32\s+(.*)/unvdup32($1)/geo or
996 s/vmov\.32\s+(.*)/unvmov32($1)/geo or
998 s/^(\s+)mov\./$1mov/o or
999 s/^(\s+)ret/$1bx\tlr/o;