2 # Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # ECP_NISTZ256 module for SPARCv9.
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
26 # with/without -DECP_NISTZ256_ASM
27 # UltraSPARC III +12-18%
28 # SPARC T4 +99-550% (+66-150% on 32-bit Solaris)
30 # Ranges denote minimum and maximum improvement coefficients depending
31 # on benchmark. Lower coefficients are for ECDSA sign, server-side
32 # operation. Keep in mind that +200% means 3x improvement.
35 open STDOUT,">$output";
38 #include "sparc_arch.h"
40 #define LOCALS (STACK_BIAS+STACK_FRAME)
42 .register %g2,#scratch
43 .register %g3,#scratch
44 # define STACK64_FRAME STACK_FRAME
45 # define LOCALS64 LOCALS
47 # define STACK64_FRAME (2047+192)
48 # define LOCALS64 STACK64_FRAME
51 .section ".text",#alloc,#execinstr
53 ########################################################################
54 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
56 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
57 open TABLE,"<ecp_nistz256_table.c" or
58 open TABLE,"<${dir}../ecp_nistz256_table.c" or
59 die "failed to open ecp_nistz256_table.c:",$!;
64 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
68 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
69 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
71 die "insane number of elements" if ($#arr != 64*16*37-1);
74 .globl ecp_nistz256_precomputed
76 ecp_nistz256_precomputed:
78 ########################################################################
79 # this conversion smashes P256_POINT_AFFINE by individual bytes with
80 # 64 byte interval, similar to
84 @tbl = splice(@arr,0,64*16);
85 for($i=0;$i<64;$i++) {
87 for($j=0;$j<64;$j++) {
88 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
91 $code.=join(',',map { sprintf "0x%02x",$_} @line);
97 my ($rp,$ap,$bp)=map("%i$_",(0..2));
98 my @acc=map("%l$_",(0..7));
99 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
100 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
101 my ($rp_real,$ap_real)=("%g2","%g3");
104 .type ecp_nistz256_precomputed,#object
105 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
107 .LRR: ! 2^512 mod P precomputed for NIST P256 polynomial
108 .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
109 .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
111 .long 1,0,0,0,0,0,0,0
112 .asciz "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
114 ! void ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
115 .globl ecp_nistz256_to_mont
117 ecp_nistz256_to_mont:
118 save %sp,-STACK_FRAME,%sp
122 call __ecp_nistz256_mul_mont
126 .type ecp_nistz256_to_mont,#function
127 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
129 ! void ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
130 .globl ecp_nistz256_from_mont
132 ecp_nistz256_from_mont:
133 save %sp,-STACK_FRAME,%sp
137 call __ecp_nistz256_mul_mont
141 .type ecp_nistz256_from_mont,#function
142 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
144 ! void ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
145 ! const BN_ULONG %i2[8]);
146 .globl ecp_nistz256_mul_mont
148 ecp_nistz256_mul_mont:
149 save %sp,-STACK_FRAME,%sp
151 call __ecp_nistz256_mul_mont
155 .type ecp_nistz256_mul_mont,#function
156 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
158 ! void ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
159 .globl ecp_nistz256_sqr_mont
161 ecp_nistz256_sqr_mont:
162 save %sp,-STACK_FRAME,%sp
164 call __ecp_nistz256_mul_mont
168 .type ecp_nistz256_sqr_mont,#function
169 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
172 ########################################################################
173 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
174 # while all others are meant to keep 32. "Meant to" means that additions
175 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
176 # they can affect outcome (follow 'and' with $mask). Also keep in mind
177 # that addition with carry is addition with 32-bit carry, even though
178 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
179 # below for VIS3 code paths.]
183 __ecp_nistz256_mul_mont:
184 ld [$bp+0],$bi ! b[0]
187 srl $mask,0,$mask ! 0xffffffff
195 mulx $a0,$bi,$t0 ! a[0-7]*b[0], 64-bit results
203 srlx $t0,32,@acc[1] ! extract high parts
210 srlx $t7,32,@acc[0] ! "@acc[8]"
213 for($i=1;$i<8;$i++) {
215 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
216 ld [$bp+4*$i],$bi ! b[$i]
217 ld [$ap+4],$t1 ! re-load a[1-7]
218 addccc @acc[2],$t2,@acc[2]
219 addccc @acc[3],$t3,@acc[3]
222 addccc @acc[4],$t4,@acc[4]
223 addccc @acc[5],$t5,@acc[5]
226 addccc @acc[6],$t6,@acc[6]
227 addccc @acc[7],$t7,@acc[7]
230 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
233 # Reduction iteration is normally performed by accumulating
234 # result of multiplication of modulus by "magic" digit [and
235 # omitting least significant word, which is guaranteed to
236 # be 0], but thanks to special form of modulus and "magic"
237 # digit being equal to least significant word, it can be
238 # performed with additions and subtractions alone. Indeed:
240 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
242 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
244 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
247 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
248 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
249 # - abcd.0000.0000.0000.0000.0000.0000.abcd
251 # or marking redundant operations:
253 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
254 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
255 # - abcd.----.----.----.----.----.----.----
258 ! multiplication-less reduction
259 addcc @acc[3],$t0,@acc[3] ! r[3]+=r[0]
260 addccc @acc[4],%g0,@acc[4] ! r[4]+=0
261 and @acc[1],$mask,@acc[1]
262 and @acc[2],$mask,@acc[2]
263 addccc @acc[5],%g0,@acc[5] ! r[5]+=0
264 addccc @acc[6],$t0,@acc[6] ! r[6]+=r[0]
265 and @acc[3],$mask,@acc[3]
266 and @acc[4],$mask,@acc[4]
267 addccc @acc[7],%g0,@acc[7] ! r[7]+=0
268 addccc @acc[0],$t0,@acc[0] ! r[8]+=r[0] "@acc[8]"
269 and @acc[5],$mask,@acc[5]
270 and @acc[6],$mask,@acc[6]
271 addc $carry,%g0,$carry ! top-most carry
272 subcc @acc[7],$t0,@acc[7] ! r[7]-=r[0]
273 subccc @acc[0],%g0,@acc[0] ! r[8]-=0 "@acc[8]"
274 subc $carry,%g0,$carry ! top-most carry
275 and @acc[7],$mask,@acc[7]
276 and @acc[0],$mask,@acc[0] ! "@acc[8]"
278 push(@acc,shift(@acc)); # rotate registers to "omit" acc[0]
280 mulx $a0,$bi,$t0 ! a[0-7]*b[$i], 64-bit results
288 add @acc[0],$t0,$t0 ! accumulate low parts, can't overflow
290 srlx $t0,32,@acc[1] ! extract high parts
303 srlx $t7,32,@acc[0] ! "@acc[8]"
307 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
308 addccc @acc[2],$t2,@acc[2]
309 addccc @acc[3],$t3,@acc[3]
310 addccc @acc[4],$t4,@acc[4]
311 addccc @acc[5],$t5,@acc[5]
312 addccc @acc[6],$t6,@acc[6]
313 addccc @acc[7],$t7,@acc[7]
314 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
317 addcc @acc[3],$t0,@acc[3] ! multiplication-less reduction
318 addccc @acc[4],%g0,@acc[4]
319 addccc @acc[5],%g0,@acc[5]
320 addccc @acc[6],$t0,@acc[6]
321 addccc @acc[7],%g0,@acc[7]
322 addccc @acc[0],$t0,@acc[0] ! "@acc[8]"
323 addc $carry,%g0,$carry
324 subcc @acc[7],$t0,@acc[7]
325 subccc @acc[0],%g0,@acc[0] ! "@acc[8]"
326 subc $carry,%g0,$carry ! top-most carry
328 push(@acc,shift(@acc)); # rotate registers to omit acc[0]
330 ! Final step is "if result > mod, subtract mod", but we do it
331 ! "other way around", namely subtract modulus from result
332 ! and if it borrowed, add modulus back.
334 subcc @acc[0],-1,@acc[0] ! subtract modulus
335 subccc @acc[1],-1,@acc[1]
336 subccc @acc[2],-1,@acc[2]
337 subccc @acc[3],0,@acc[3]
338 subccc @acc[4],0,@acc[4]
339 subccc @acc[5],0,@acc[5]
340 subccc @acc[6],1,@acc[6]
341 subccc @acc[7],-1,@acc[7]
342 subc $carry,0,$carry ! broadcast borrow bit
344 ! Note that because mod has special form, i.e. consists of
345 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
346 ! using value of broadcasted borrow and the borrow bit itself.
347 ! To minimize dependency chain we first broadcast and then
348 ! extract the bit by negating (follow $bi).
350 addcc @acc[0],$carry,@acc[0] ! add modulus or zero
351 addccc @acc[1],$carry,@acc[1]
354 addccc @acc[2],$carry,@acc[2]
356 addccc @acc[3],0,@acc[3]
358 addccc @acc[4],0,@acc[4]
360 addccc @acc[5],0,@acc[5]
362 addccc @acc[6],$bi,@acc[6]
364 addc @acc[7],$carry,@acc[7]
368 .type __ecp_nistz256_mul_mont,#function
369 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
371 ! void ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
372 ! const BN_ULONG %i2[8]);
373 .globl ecp_nistz256_add
376 save %sp,-STACK_FRAME,%sp
384 call __ecp_nistz256_add
388 .type ecp_nistz256_add,#function
389 .size ecp_nistz256_add,.-ecp_nistz256_add
393 ld [$bp+0],$t0 ! b[0]
397 addcc @acc[0],$t0,@acc[0]
400 addccc @acc[1],$t1,@acc[1]
403 addccc @acc[2],$t2,@acc[2]
404 addccc @acc[3],$t3,@acc[3]
405 addccc @acc[4],$t4,@acc[4]
406 addccc @acc[5],$t5,@acc[5]
407 addccc @acc[6],$t6,@acc[6]
408 addccc @acc[7],$t7,@acc[7]
413 ! if a+b >= modulus, subtract modulus.
415 ! But since comparison implies subtraction, we subtract
416 ! modulus and then add it back if subraction borrowed.
418 subcc @acc[0],-1,@acc[0]
419 subccc @acc[1],-1,@acc[1]
420 subccc @acc[2],-1,@acc[2]
421 subccc @acc[3], 0,@acc[3]
422 subccc @acc[4], 0,@acc[4]
423 subccc @acc[5], 0,@acc[5]
424 subccc @acc[6], 1,@acc[6]
425 subccc @acc[7],-1,@acc[7]
428 ! Note that because mod has special form, i.e. consists of
429 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
430 ! using value of borrow and its negative.
432 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
433 addccc @acc[1],$carry,@acc[1]
436 addccc @acc[2],$carry,@acc[2]
438 addccc @acc[3],0,@acc[3]
440 addccc @acc[4],0,@acc[4]
442 addccc @acc[5],0,@acc[5]
444 addccc @acc[6],$bi,@acc[6]
446 addc @acc[7],$carry,@acc[7]
450 .type __ecp_nistz256_add,#function
451 .size __ecp_nistz256_add,.-__ecp_nistz256_add
453 ! void ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
454 .globl ecp_nistz256_mul_by_2
456 ecp_nistz256_mul_by_2:
457 save %sp,-STACK_FRAME,%sp
465 call __ecp_nistz256_mul_by_2
469 .type ecp_nistz256_mul_by_2,#function
470 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
473 __ecp_nistz256_mul_by_2:
474 addcc @acc[0],@acc[0],@acc[0] ! a+a=2*a
475 addccc @acc[1],@acc[1],@acc[1]
476 addccc @acc[2],@acc[2],@acc[2]
477 addccc @acc[3],@acc[3],@acc[3]
478 addccc @acc[4],@acc[4],@acc[4]
479 addccc @acc[5],@acc[5],@acc[5]
480 addccc @acc[6],@acc[6],@acc[6]
481 addccc @acc[7],@acc[7],@acc[7]
484 .type __ecp_nistz256_mul_by_2,#function
485 .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
487 ! void ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
488 .globl ecp_nistz256_mul_by_3
490 ecp_nistz256_mul_by_3:
491 save %sp,-STACK_FRAME,%sp
499 call __ecp_nistz256_mul_by_3
503 .type ecp_nistz256_mul_by_3,#function
504 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
507 __ecp_nistz256_mul_by_3:
508 addcc @acc[0],@acc[0],$t0 ! a+a=2*a
509 addccc @acc[1],@acc[1],$t1
510 addccc @acc[2],@acc[2],$t2
511 addccc @acc[3],@acc[3],$t3
512 addccc @acc[4],@acc[4],$t4
513 addccc @acc[5],@acc[5],$t5
514 addccc @acc[6],@acc[6],$t6
515 addccc @acc[7],@acc[7],$t7
518 subcc $t0,-1,$t0 ! .Lreduce_by_sub but without stores
528 addcc $t0,$carry,$t0 ! add synthesized modulus
529 addccc $t1,$carry,$t1
531 addccc $t2,$carry,$t2
538 addcc $t0,@acc[0],@acc[0] ! 2*a+a=3*a
539 addccc $t1,@acc[1],@acc[1]
540 addccc $t2,@acc[2],@acc[2]
541 addccc $t3,@acc[3],@acc[3]
542 addccc $t4,@acc[4],@acc[4]
543 addccc $t5,@acc[5],@acc[5]
544 addccc $t6,@acc[6],@acc[6]
545 addccc $t7,@acc[7],@acc[7]
548 .type __ecp_nistz256_mul_by_3,#function
549 .size __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
551 ! void ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
552 ! const BN_ULONG %i2[8]);
553 .globl ecp_nistz256_sub
556 save %sp,-STACK_FRAME,%sp
564 call __ecp_nistz256_sub_from
568 .type ecp_nistz256_sub,#function
569 .size ecp_nistz256_sub,.-ecp_nistz256_sub
571 ! void ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
572 .globl ecp_nistz256_neg
575 save %sp,-STACK_FRAME,%sp
584 call __ecp_nistz256_sub_from
588 .type ecp_nistz256_neg,#function
589 .size ecp_nistz256_neg,.-ecp_nistz256_neg
592 __ecp_nistz256_sub_from:
593 ld [$bp+0],$t0 ! b[0]
597 subcc @acc[0],$t0,@acc[0]
600 subccc @acc[1],$t1,@acc[1]
601 subccc @acc[2],$t2,@acc[2]
604 subccc @acc[3],$t3,@acc[3]
605 subccc @acc[4],$t4,@acc[4]
606 subccc @acc[5],$t5,@acc[5]
607 subccc @acc[6],$t6,@acc[6]
608 subccc @acc[7],$t7,@acc[7]
609 subc %g0,%g0,$carry ! broadcast borrow bit
613 ! if a-b borrows, add modulus.
615 ! Note that because mod has special form, i.e. consists of
616 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
617 ! using value of broadcasted borrow and the borrow bit itself.
618 ! To minimize dependency chain we first broadcast and then
619 ! extract the bit by negating (follow $bi).
621 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
622 addccc @acc[1],$carry,@acc[1]
625 addccc @acc[2],$carry,@acc[2]
627 addccc @acc[3],0,@acc[3]
629 addccc @acc[4],0,@acc[4]
631 addccc @acc[5],0,@acc[5]
633 addccc @acc[6],$bi,@acc[6]
635 addc @acc[7],$carry,@acc[7]
639 .type __ecp_nistz256_sub_from,#function
640 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
643 __ecp_nistz256_sub_morf:
644 ld [$bp+0],$t0 ! b[0]
648 subcc $t0,@acc[0],@acc[0]
651 subccc $t1,@acc[1],@acc[1]
652 subccc $t2,@acc[2],@acc[2]
655 subccc $t3,@acc[3],@acc[3]
656 subccc $t4,@acc[4],@acc[4]
657 subccc $t5,@acc[5],@acc[5]
658 subccc $t6,@acc[6],@acc[6]
659 subccc $t7,@acc[7],@acc[7]
661 subc %g0,%g0,$carry ! broadcast borrow bit
662 .type __ecp_nistz256_sub_morf,#function
663 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
665 ! void ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
666 .globl ecp_nistz256_div_by_2
668 ecp_nistz256_div_by_2:
669 save %sp,-STACK_FRAME,%sp
677 call __ecp_nistz256_div_by_2
681 .type ecp_nistz256_div_by_2,#function
682 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
685 __ecp_nistz256_div_by_2:
686 ! ret = (a is odd ? a+mod : a) >> 1
690 addcc @acc[0],$carry,@acc[0]
691 addccc @acc[1],$carry,@acc[1]
692 addccc @acc[2],$carry,@acc[2]
693 addccc @acc[3],0,@acc[3]
694 addccc @acc[4],0,@acc[4]
695 addccc @acc[5],0,@acc[5]
696 addccc @acc[6],$bi,@acc[6]
697 addccc @acc[7],$carry,@acc[7]
702 srl @acc[0],1,@acc[0]
704 srl @acc[1],1,@acc[1]
705 or @acc[0],$t0,@acc[0]
707 srl @acc[2],1,@acc[2]
708 or @acc[1],$t1,@acc[1]
711 srl @acc[3],1,@acc[3]
712 or @acc[2],$t2,@acc[2]
715 srl @acc[4],1,@acc[4]
716 or @acc[3],$t3,@acc[3]
719 srl @acc[5],1,@acc[5]
720 or @acc[4],$t4,@acc[4]
723 srl @acc[6],1,@acc[6]
724 or @acc[5],$t5,@acc[5]
727 srl @acc[7],1,@acc[7]
728 or @acc[6],$t6,@acc[6]
731 or @acc[7],$t7,@acc[7]
735 .type __ecp_nistz256_div_by_2,#function
736 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
739 ########################################################################
740 # following subroutines are "literal" implementation of those found in
743 ########################################################################
744 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
747 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
748 # above map() describes stack layout with 4 temporary
749 # 256-bit vectors on top.
756 .globl ecp_nistz256_point_double
758 ecp_nistz256_point_double:
759 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
760 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
761 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
762 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
763 be ecp_nistz256_point_double_vis3
766 save %sp,-STACK_FRAME-32*4,%sp
771 .Lpoint_double_shortcut:
773 ld [$ap+32+4],@acc[1]
774 ld [$ap+32+8],@acc[2]
775 ld [$ap+32+12],@acc[3]
776 ld [$ap+32+16],@acc[4]
777 ld [$ap+32+20],@acc[5]
778 ld [$ap+32+24],@acc[6]
779 ld [$ap+32+28],@acc[7]
780 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
781 add %sp,LOCALS+$S,$rp
785 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
786 add %sp,LOCALS+$Zsqr,$rp
789 call __ecp_nistz256_add ! p256_add(M, Zsqr, in_x);
790 add %sp,LOCALS+$M,$rp
792 add %sp,LOCALS+$S,$bp
793 add %sp,LOCALS+$S,$ap
794 call __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
795 add %sp,LOCALS+$S,$rp
797 ld [$ap_real],@acc[0]
798 add %sp,LOCALS+$Zsqr,$bp
799 ld [$ap_real+4],@acc[1]
800 ld [$ap_real+8],@acc[2]
801 ld [$ap_real+12],@acc[3]
802 ld [$ap_real+16],@acc[4]
803 ld [$ap_real+20],@acc[5]
804 ld [$ap_real+24],@acc[6]
805 ld [$ap_real+28],@acc[7]
806 call __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
807 add %sp,LOCALS+$Zsqr,$rp
811 call __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
812 add %sp,LOCALS+$tmp0,$rp
814 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
817 add %sp,LOCALS+$Zsqr,$bp
818 add %sp,LOCALS+$M,$ap
819 call __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
820 add %sp,LOCALS+$M,$rp
822 call __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
823 add %sp,LOCALS+$M,$rp
825 add %sp,LOCALS+$S,$bp
826 add %sp,LOCALS+$S,$ap
827 call __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
828 add %sp,LOCALS+$tmp0,$rp
830 call __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
834 add %sp,LOCALS+$S,$ap
835 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
836 add %sp,LOCALS+$S,$rp
838 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
839 add %sp,LOCALS+$tmp0,$rp
841 add %sp,LOCALS+$M,$bp
842 add %sp,LOCALS+$M,$ap
843 call __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
846 add %sp,LOCALS+$tmp0,$bp
847 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
850 add %sp,LOCALS+$S,$bp
851 call __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
852 add %sp,LOCALS+$S,$rp
854 add %sp,LOCALS+$M,$bp
855 add %sp,LOCALS+$S,$ap
856 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
857 add %sp,LOCALS+$S,$rp
860 call __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
865 .type ecp_nistz256_point_double,#function
866 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
870 ########################################################################
871 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
872 # const P256_POINT *in2);
874 my ($res_x,$res_y,$res_z,
875 $H,$Hsqr,$R,$Rsqr,$Hcub,
876 $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
877 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
879 # above map() describes stack layout with 12 temporary
880 # 256-bit vectors on top. Then we reserve some space for
881 # !in1infty, !in2infty, result of check for zero and return pointer.
883 my $bp_real=$rp_real;
886 .globl ecp_nistz256_point_add
888 ecp_nistz256_point_add:
889 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
890 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
891 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
892 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
893 be ecp_nistz256_point_add_vis3
896 save %sp,-STACK_FRAME-32*12-32,%sp
898 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
902 ld [$bp+64],$t0 ! in2_z
916 or $t4,$t0,$t0 ! !in2infty
918 st $t0,[%fp+STACK_BIAS-12]
920 ld [$ap+64],$t0 ! in1_z
934 or $t4,$t0,$t0 ! !in1infty
936 st $t0,[%fp+STACK_BIAS-16]
940 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
941 add %sp,LOCALS+$Z2sqr,$rp
945 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
946 add %sp,LOCALS+$Z1sqr,$rp
949 add %sp,LOCALS+$Z2sqr,$ap
950 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
951 add %sp,LOCALS+$S1,$rp
954 add %sp,LOCALS+$Z1sqr,$ap
955 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
956 add %sp,LOCALS+$S2,$rp
959 add %sp,LOCALS+$S1,$ap
960 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
961 add %sp,LOCALS+$S1,$rp
964 add %sp,LOCALS+$S2,$ap
965 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
966 add %sp,LOCALS+$S2,$rp
968 add %sp,LOCALS+$S1,$bp
969 call __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
970 add %sp,LOCALS+$R,$rp
972 or @acc[1],@acc[0],@acc[0] ! see if result is zero
973 or @acc[3],@acc[2],@acc[2]
974 or @acc[5],@acc[4],@acc[4]
975 or @acc[7],@acc[6],@acc[6]
976 or @acc[2],@acc[0],@acc[0]
977 or @acc[6],@acc[4],@acc[4]
978 or @acc[4],@acc[0],@acc[0]
979 st @acc[0],[%fp+STACK_BIAS-20]
982 add %sp,LOCALS+$Z2sqr,$ap
983 call __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
984 add %sp,LOCALS+$U1,$rp
987 add %sp,LOCALS+$Z1sqr,$ap
988 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
989 add %sp,LOCALS+$U2,$rp
991 add %sp,LOCALS+$U1,$bp
992 call __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
993 add %sp,LOCALS+$H,$rp
995 or @acc[1],@acc[0],@acc[0] ! see if result is zero
996 or @acc[3],@acc[2],@acc[2]
997 or @acc[5],@acc[4],@acc[4]
998 or @acc[7],@acc[6],@acc[6]
999 or @acc[2],@acc[0],@acc[0]
1000 or @acc[6],@acc[4],@acc[4]
1001 orcc @acc[4],@acc[0],@acc[0]
1003 bne,pt %icc,.Ladd_proceed ! is_equal(U1,U2)?
1006 ld [%fp+STACK_BIAS-12],$t0
1007 ld [%fp+STACK_BIAS-16],$t1
1008 ld [%fp+STACK_BIAS-20],$t2
1010 be,pt %icc,.Ladd_proceed ! (in1infty || in2infty)?
1013 be,pt %icc,.Ladd_double ! is_equal(S1,S2)?
1016 ldx [%fp+STACK_BIAS-8],$rp
1046 ldx [%fp+STACK_BIAS-8],$rp_real
1048 b .Lpoint_double_shortcut
1049 add %sp,32*(12-4)+32,%sp ! difference in frame sizes
1053 add %sp,LOCALS+$R,$bp
1054 add %sp,LOCALS+$R,$ap
1055 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1056 add %sp,LOCALS+$Rsqr,$rp
1059 add %sp,LOCALS+$H,$ap
1060 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1061 add %sp,LOCALS+$res_z,$rp
1063 add %sp,LOCALS+$H,$bp
1064 add %sp,LOCALS+$H,$ap
1065 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1066 add %sp,LOCALS+$Hsqr,$rp
1069 add %sp,LOCALS+$res_z,$ap
1070 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1071 add %sp,LOCALS+$res_z,$rp
1073 add %sp,LOCALS+$H,$bp
1074 add %sp,LOCALS+$Hsqr,$ap
1075 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1076 add %sp,LOCALS+$Hcub,$rp
1078 add %sp,LOCALS+$U1,$bp
1079 add %sp,LOCALS+$Hsqr,$ap
1080 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1081 add %sp,LOCALS+$U2,$rp
1083 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1084 add %sp,LOCALS+$Hsqr,$rp
1086 add %sp,LOCALS+$Rsqr,$bp
1087 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1088 add %sp,LOCALS+$res_x,$rp
1090 add %sp,LOCALS+$Hcub,$bp
1091 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1092 add %sp,LOCALS+$res_x,$rp
1094 add %sp,LOCALS+$U2,$bp
1095 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1096 add %sp,LOCALS+$res_y,$rp
1098 add %sp,LOCALS+$Hcub,$bp
1099 add %sp,LOCALS+$S1,$ap
1100 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1101 add %sp,LOCALS+$S2,$rp
1103 add %sp,LOCALS+$R,$bp
1104 add %sp,LOCALS+$res_y,$ap
1105 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1106 add %sp,LOCALS+$res_y,$rp
1108 add %sp,LOCALS+$S2,$bp
1109 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1110 add %sp,LOCALS+$res_y,$rp
1112 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1113 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1114 ldx [%fp+STACK_BIAS-8],$rp
1116 for($i=0;$i<96;$i+=8) { # conditional moves
1118 ld [%sp+LOCALS+$i],@acc[0] ! res
1119 ld [%sp+LOCALS+$i+4],@acc[1]
1120 ld [$bp_real+$i],@acc[2] ! in2
1121 ld [$bp_real+$i+4],@acc[3]
1122 ld [$ap_real+$i],@acc[4] ! in1
1123 ld [$ap_real+$i+4],@acc[5]
1124 movrz $t1,@acc[2],@acc[0]
1125 movrz $t1,@acc[3],@acc[1]
1126 movrz $t2,@acc[4],@acc[0]
1127 movrz $t2,@acc[5],@acc[1]
1129 st @acc[1],[$rp+$i+4]
1136 .type ecp_nistz256_point_add,#function
1137 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1141 ########################################################################
1142 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1143 # const P256_POINT_AFFINE *in2);
1145 my ($res_x,$res_y,$res_z,
1146 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1148 # above map() describes stack layout with 10 temporary
1149 # 256-bit vectors on top. Then we reserve some space for
1150 # !in1infty, !in2infty, result of check for zero and return pointer.
1152 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1153 my $bp_real=$rp_real;
1156 .globl ecp_nistz256_point_add_affine
1158 ecp_nistz256_point_add_affine:
1159 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1160 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
1161 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1162 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1163 be ecp_nistz256_point_add_affine_vis3
1166 save %sp,-STACK_FRAME-32*10-32,%sp
1168 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
1172 ld [$ap+64],$t0 ! in1_z
1186 or $t4,$t0,$t0 ! !in1infty
1188 st $t0,[%fp+STACK_BIAS-16]
1190 ld [$bp],@acc[0] ! in2_x
1198 ld [$bp+32],$t0 ! in2_y
1206 or @acc[1],@acc[0],@acc[0]
1207 or @acc[3],@acc[2],@acc[2]
1208 or @acc[5],@acc[4],@acc[4]
1209 or @acc[7],@acc[6],@acc[6]
1210 or @acc[2],@acc[0],@acc[0]
1211 or @acc[6],@acc[4],@acc[4]
1212 or @acc[4],@acc[0],@acc[0]
1220 or @acc[0],$t0,$t0 ! !in2infty
1222 st $t0,[%fp+STACK_BIAS-12]
1226 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1227 add %sp,LOCALS+$Z1sqr,$rp
1230 add %sp,LOCALS+$Z1sqr,$ap
1231 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1232 add %sp,LOCALS+$U2,$rp
1235 call __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1236 add %sp,LOCALS+$H,$rp
1239 add %sp,LOCALS+$Z1sqr,$ap
1240 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1241 add %sp,LOCALS+$S2,$rp
1244 add %sp,LOCALS+$H,$ap
1245 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1246 add %sp,LOCALS+$res_z,$rp
1249 add %sp,LOCALS+$S2,$ap
1250 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1251 add %sp,LOCALS+$S2,$rp
1254 call __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1255 add %sp,LOCALS+$R,$rp
1257 add %sp,LOCALS+$H,$bp
1258 add %sp,LOCALS+$H,$ap
1259 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1260 add %sp,LOCALS+$Hsqr,$rp
1262 add %sp,LOCALS+$R,$bp
1263 add %sp,LOCALS+$R,$ap
1264 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1265 add %sp,LOCALS+$Rsqr,$rp
1267 add %sp,LOCALS+$H,$bp
1268 add %sp,LOCALS+$Hsqr,$ap
1269 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1270 add %sp,LOCALS+$Hcub,$rp
1273 add %sp,LOCALS+$Hsqr,$ap
1274 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1275 add %sp,LOCALS+$U2,$rp
1277 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1278 add %sp,LOCALS+$Hsqr,$rp
1280 add %sp,LOCALS+$Rsqr,$bp
1281 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1282 add %sp,LOCALS+$res_x,$rp
1284 add %sp,LOCALS+$Hcub,$bp
1285 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1286 add %sp,LOCALS+$res_x,$rp
1288 add %sp,LOCALS+$U2,$bp
1289 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1290 add %sp,LOCALS+$res_y,$rp
1293 add %sp,LOCALS+$Hcub,$ap
1294 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1295 add %sp,LOCALS+$S2,$rp
1297 add %sp,LOCALS+$R,$bp
1298 add %sp,LOCALS+$res_y,$ap
1299 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1300 add %sp,LOCALS+$res_y,$rp
1302 add %sp,LOCALS+$S2,$bp
1303 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1304 add %sp,LOCALS+$res_y,$rp
1306 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1307 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1308 ldx [%fp+STACK_BIAS-8],$rp
1310 for($i=0;$i<64;$i+=8) { # conditional moves
1312 ld [%sp+LOCALS+$i],@acc[0] ! res
1313 ld [%sp+LOCALS+$i+4],@acc[1]
1314 ld [$bp_real+$i],@acc[2] ! in2
1315 ld [$bp_real+$i+4],@acc[3]
1316 ld [$ap_real+$i],@acc[4] ! in1
1317 ld [$ap_real+$i+4],@acc[5]
1318 movrz $t1,@acc[2],@acc[0]
1319 movrz $t1,@acc[3],@acc[1]
1320 movrz $t2,@acc[4],@acc[0]
1321 movrz $t2,@acc[5],@acc[1]
1323 st @acc[1],[$rp+$i+4]
1329 ld [%sp+LOCALS+$i],@acc[0] ! res
1330 ld [%sp+LOCALS+$i+4],@acc[1]
1331 ld [$ap_real+$i],@acc[4] ! in1
1332 ld [$ap_real+$i+4],@acc[5]
1333 movrz $t1,@ONE_mont[$j],@acc[0]
1334 movrz $t1,@ONE_mont[$j+1],@acc[1]
1335 movrz $t2,@acc[4],@acc[0]
1336 movrz $t2,@acc[5],@acc[1]
1338 st @acc[1],[$rp+$i+4]
1344 .type ecp_nistz256_point_add_affine,#function
1345 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1349 my ($out,$inp,$index)=map("%i$_",(0..2));
1353 ! void ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1355 .globl ecp_nistz256_scatter_w5
1357 ecp_nistz256_scatter_w5:
1358 save %sp,-STACK_FRAME,%sp
1361 add $out,$index,$out
1372 st %l0,[$out+64*0-4]
1373 st %l1,[$out+64*1-4]
1374 st %l2,[$out+64*2-4]
1375 st %l3,[$out+64*3-4]
1376 st %l4,[$out+64*4-4]
1377 st %l5,[$out+64*5-4]
1378 st %l6,[$out+64*6-4]
1379 st %l7,[$out+64*7-4]
1391 st %l0,[$out+64*0-4]
1392 st %l1,[$out+64*1-4]
1393 st %l2,[$out+64*2-4]
1394 st %l3,[$out+64*3-4]
1395 st %l4,[$out+64*4-4]
1396 st %l5,[$out+64*5-4]
1397 st %l6,[$out+64*6-4]
1398 st %l7,[$out+64*7-4]
1409 st %l0,[$out+64*0-4]
1410 st %l1,[$out+64*1-4]
1411 st %l2,[$out+64*2-4]
1412 st %l3,[$out+64*3-4]
1413 st %l4,[$out+64*4-4]
1414 st %l5,[$out+64*5-4]
1415 st %l6,[$out+64*6-4]
1416 st %l7,[$out+64*7-4]
1420 .type ecp_nistz256_scatter_w5,#function
1421 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1423 ! void ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1425 .globl ecp_nistz256_gather_w5
1427 ecp_nistz256_gather_w5:
1428 save %sp,-STACK_FRAME,%sp
1433 add $index,$mask,$index
1435 add $inp,$index,$inp
1518 .type ecp_nistz256_gather_w5,#function
1519 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1521 ! void ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1523 .globl ecp_nistz256_scatter_w7
1525 ecp_nistz256_scatter_w7:
1526 save %sp,-STACK_FRAME,%sp
1528 add $out,$index,$out
1533 subcc $index,1,$index
1534 stb %l0,[$out+64*0-1]
1536 stb %l1,[$out+64*1-1]
1538 stb %l2,[$out+64*2-1]
1540 stb %l3,[$out+64*3-1]
1541 bne .Loop_scatter_w7
1546 .type ecp_nistz256_scatter_w7,#function
1547 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1549 ! void ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1551 .globl ecp_nistz256_gather_w7
1553 ecp_nistz256_gather_w7:
1554 save %sp,-STACK_FRAME,%sp
1559 add $index,$mask,$index
1560 add $inp,$index,$inp
1564 ldub [$inp+64*0],%l0
1565 prefetch [$inp+3840+64*0],1
1566 subcc $index,1,$index
1567 ldub [$inp+64*1],%l1
1568 prefetch [$inp+3840+64*1],1
1569 ldub [$inp+64*2],%l2
1570 prefetch [$inp+3840+64*2],1
1571 ldub [$inp+64*3],%l3
1572 prefetch [$inp+3840+64*3],1
1587 .type ecp_nistz256_gather_w7,#function
1588 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1592 ########################################################################
1593 # Following subroutines are VIS3 counterparts of those above that
1594 # implement ones found in ecp_nistz256.c. Key difference is that they
1595 # use 128-bit muliplication and addition with 64-bit carry, and in order
1596 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1597 # entry and vice versa on return.
1599 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1600 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1601 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1602 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1603 my ($rp_real,$ap_real)=("%g2","%g3");
1604 my ($acc6,$acc7)=($bp,$bi); # used in squaring
1608 __ecp_nistz256_mul_by_2_vis3:
1609 addcc $acc0,$acc0,$acc0
1610 addxccc $acc1,$acc1,$acc1
1611 addxccc $acc2,$acc2,$acc2
1612 addxccc $acc3,$acc3,$acc3
1613 b .Lreduce_by_sub_vis3
1614 addxc %g0,%g0,$acc4 ! did it carry?
1615 .type __ecp_nistz256_mul_by_2_vis3,#function
1616 .size __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1619 __ecp_nistz256_add_vis3:
1625 __ecp_nistz256_add_noload_vis3:
1627 addcc $t0,$acc0,$acc0
1628 addxccc $t1,$acc1,$acc1
1629 addxccc $t2,$acc2,$acc2
1630 addxccc $t3,$acc3,$acc3
1631 addxc %g0,%g0,$acc4 ! did it carry?
1633 .Lreduce_by_sub_vis3:
1635 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
1636 addxccc $acc1,$poly1,$t1
1637 addxccc $acc2,$minus1,$t2
1638 addxccc $acc3,$poly3,$t3
1639 addxc $acc4,$minus1,$acc4
1641 movrz $acc4,$t0,$acc0 ! ret = borrow ? ret : ret-modulus
1642 movrz $acc4,$t1,$acc1
1644 movrz $acc4,$t2,$acc2
1646 movrz $acc4,$t3,$acc3
1650 .type __ecp_nistz256_add_vis3,#function
1651 .size __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1653 ! Trouble with subtraction is that there is no subtraction with 64-bit
1654 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1655 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1656 ! recall that SPARC is big-endian, which is why you'll observe that
1657 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1658 ! "collect" result back to 64-bit $acc0-$acc3.
1660 __ecp_nistz256_sub_from_vis3:
1669 subcc $acc0,$t0,$acc0
1671 subccc $acc4,$t1,$acc4
1673 subccc $acc1,$t2,$acc1
1675 and $acc0,$poly1,$acc0
1676 subccc $acc5,$t3,$acc5
1679 and $acc1,$poly1,$acc1
1681 or $acc0,$acc4,$acc0
1683 or $acc1,$acc5,$acc1
1685 subccc $acc2,$t0,$acc2
1686 subccc $acc4,$t1,$acc4
1687 subccc $acc3,$t2,$acc3
1688 and $acc2,$poly1,$acc2
1689 subccc $acc5,$t3,$acc5
1691 and $acc3,$poly1,$acc3
1693 or $acc2,$acc4,$acc2
1694 subc %g0,%g0,$acc4 ! did it borrow?
1695 b .Lreduce_by_add_vis3
1696 or $acc3,$acc5,$acc3
1697 .type __ecp_nistz256_sub_from_vis3,#function
1698 .size __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1701 __ecp_nistz256_sub_morf_vis3:
1710 subcc $t0,$acc0,$acc0
1712 subccc $t1,$acc4,$acc4
1714 subccc $t2,$acc1,$acc1
1716 and $acc0,$poly1,$acc0
1717 subccc $t3,$acc5,$acc5
1720 and $acc1,$poly1,$acc1
1722 or $acc0,$acc4,$acc0
1724 or $acc1,$acc5,$acc1
1726 subccc $t0,$acc2,$acc2
1727 subccc $t1,$acc4,$acc4
1728 subccc $t2,$acc3,$acc3
1729 and $acc2,$poly1,$acc2
1730 subccc $t3,$acc5,$acc5
1732 and $acc3,$poly1,$acc3
1734 or $acc2,$acc4,$acc2
1735 subc %g0,%g0,$acc4 ! did it borrow?
1736 or $acc3,$acc5,$acc3
1738 .Lreduce_by_add_vis3:
1740 addcc $acc0,-1,$t0 ! add modulus
1742 addxccc $acc1,$poly1,$t1
1743 not $poly1,$poly1 ! restore $poly1
1744 addxccc $acc2,%g0,$t2
1747 movrnz $acc4,$t0,$acc0 ! if a-b borrowed, ret = ret+mod
1748 movrnz $acc4,$t1,$acc1
1750 movrnz $acc4,$t2,$acc2
1752 movrnz $acc4,$t3,$acc3
1756 .type __ecp_nistz256_sub_morf_vis3,#function
1757 .size __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1760 __ecp_nistz256_div_by_2_vis3:
1761 ! ret = (a is odd ? a+mod : a) >> 1
1766 addcc $acc0,-1,$t0 ! add modulus
1767 addxccc $acc1,$t1,$t1
1768 addxccc $acc2,%g0,$t2
1769 addxccc $acc3,$t3,$t3
1770 addxc %g0,%g0,$acc4 ! carry bit
1772 movrnz $acc5,$t0,$acc0
1773 movrnz $acc5,$t1,$acc1
1774 movrnz $acc5,$t2,$acc2
1775 movrnz $acc5,$t3,$acc3
1776 movrz $acc5,%g0,$acc4
1791 sllx $acc4,63,$t3 ! don't forget carry bit
1797 .type __ecp_nistz256_div_by_2_vis3,#function
1798 .size __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1800 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1801 ! 4x faster [on T4]...
1803 __ecp_nistz256_mul_mont_vis3:
1805 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1813 ldx [$bp+8],$bi ! b[1]
1815 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1817 addxccc $acc2,$t1,$acc2
1819 addxccc $acc3,$t2,$acc3
1823 for($i=1;$i<4;$i++) {
1824 # Reduction iteration is normally performed by accumulating
1825 # result of multiplication of modulus by "magic" digit [and
1826 # omitting least significant word, which is guaranteed to
1827 # be 0], but thanks to special form of modulus and "magic"
1828 # digit being equal to least significant word, it can be
1829 # performed with additions and subtractions alone. Indeed:
1831 # ffff0001.00000000.0000ffff.ffffffff
1833 # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1835 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1838 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1839 # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1840 # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1842 # or marking redundant operations:
1844 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1845 # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1846 # - 0000abcd.efgh0000.--------.--------.--------
1847 # ^^^^^^^^ but this word is calculated with umulxhi, because
1848 # there is no subtract with 64-bit borrow:-(
1851 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1852 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1853 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1855 addxccc $acc2,$t1,$acc1
1857 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1859 addxccc $acc4,$t3,$acc3
1861 addxc $acc5,%g0,$acc4
1863 addcc $acc0,$t0,$acc0 ! accumulate low parts of multiplication
1865 addxccc $acc1,$t1,$acc1
1867 addxccc $acc2,$t2,$acc2
1869 addxccc $acc3,$t3,$acc3
1871 addxc $acc4,%g0,$acc4
1873 $code.=<<___ if ($i<3);
1874 ldx [$bp+8*($i+1)],$bi ! bp[$i+1]
1877 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1879 addxccc $acc2,$t1,$acc2
1881 addxccc $acc3,$t2,$acc3
1882 addxccc $acc4,$t3,$acc4
1887 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1888 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1889 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1890 addxccc $acc2,$t1,$acc1
1891 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1892 addxccc $acc4,$t3,$acc3
1893 b .Lmul_final_vis3 ! see below
1894 addxc $acc5,%g0,$acc4
1895 .type __ecp_nistz256_mul_mont_vis3,#function
1896 .size __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1898 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1899 ! instructions, but only 14% faster [on T4]...
1901 __ecp_nistz256_sqr_mont_vis3:
1902 ! | | | | | |a1*a0| |
1903 ! | | | | |a2*a0| | |
1904 ! | |a3*a2|a3*a0| | | |
1905 ! | | | |a2*a1| | | |
1906 ! | | |a3*a1| | | | |
1907 ! *| | | | | | | | 2|
1908 ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1909 ! |--+--+--+--+--+--+--+--|
1910 ! |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1912 ! "can't overflow" below mark carrying into high part of
1913 ! multiplication result, which can't overflow, because it
1914 ! can never be all ones.
1916 mulx $a1,$a0,$acc1 ! a[1]*a[0]
1918 mulx $a2,$a0,$acc2 ! a[2]*a[0]
1920 mulx $a3,$a0,$acc3 ! a[3]*a[0]
1921 umulxhi $a3,$a0,$acc4
1923 addcc $acc2,$t1,$acc2 ! accumulate high parts of multiplication
1924 mulx $a2,$a1,$t0 ! a[2]*a[1]
1926 addxccc $acc3,$t2,$acc3
1927 mulx $a3,$a1,$t2 ! a[3]*a[1]
1929 addxc $acc4,%g0,$acc4 ! can't overflow
1931 mulx $a3,$a2,$acc5 ! a[3]*a[2]
1932 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1933 umulxhi $a3,$a2,$acc6
1935 addcc $t2,$t1,$t1 ! accumulate high parts of multiplication
1936 mulx $a0,$a0,$acc0 ! a[0]*a[0]
1937 addxc $t3,%g0,$t2 ! can't overflow
1939 addcc $acc3,$t0,$acc3 ! accumulate low parts of multiplication
1941 addxccc $acc4,$t1,$acc4
1942 mulx $a1,$a1,$t1 ! a[1]*a[1]
1943 addxccc $acc5,$t2,$acc5
1945 addxc $acc6,%g0,$acc6 ! can't overflow
1947 addcc $acc1,$acc1,$acc1 ! acc[1-6]*=2
1948 mulx $a2,$a2,$t2 ! a[2]*a[2]
1949 addxccc $acc2,$acc2,$acc2
1951 addxccc $acc3,$acc3,$acc3
1952 mulx $a3,$a3,$t3 ! a[3]*a[3]
1953 addxccc $acc4,$acc4,$acc4
1955 addxccc $acc5,$acc5,$acc5
1956 addxccc $acc6,$acc6,$acc6
1959 addcc $acc1,$a0,$acc1 ! +a[i]*a[i]
1960 addxccc $acc2,$t1,$acc2
1961 addxccc $acc3,$a1,$acc3
1962 addxccc $acc4,$t2,$acc4
1964 addxccc $acc5,$a2,$acc5
1966 addxccc $acc6,$t3,$acc6
1967 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1968 addxc $acc7,$a3,$acc7
1970 for($i=0;$i<3;$i++) { # reductions, see commentary
1971 # in multiplication for details
1973 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1974 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1976 addxccc $acc2,$t1,$acc1
1978 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1979 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1980 addxc %g0,$t3,$acc3 ! cant't overflow
1984 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1985 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1986 addxccc $acc2,$t1,$acc1
1987 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1988 addxc %g0,$t3,$acc3 ! can't overflow
1990 addcc $acc0,$acc4,$acc0 ! accumulate upper half
1991 addxccc $acc1,$acc5,$acc1
1992 addxccc $acc2,$acc6,$acc2
1993 addxccc $acc3,$acc7,$acc3
1998 ! Final step is "if result > mod, subtract mod", but as comparison
1999 ! means subtraction, we do the subtraction and then copy outcome
2000 ! if it didn't borrow. But note that as we [have to] replace
2001 ! subtraction with addition with negative, carry/borrow logic is
2004 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
2005 not $poly3,$poly3 ! restore 0x00000000FFFFFFFE
2006 addxccc $acc1,$poly1,$t1
2007 addxccc $acc2,$minus1,$t2
2008 addxccc $acc3,$poly3,$t3
2009 addxccc $acc4,$minus1,%g0 ! did it carry?
2011 movcs %xcc,$t0,$acc0
2012 movcs %xcc,$t1,$acc1
2014 movcs %xcc,$t2,$acc2
2016 movcs %xcc,$t3,$acc3
2020 .type __ecp_nistz256_sqr_mont_vis3,#function
2021 .size __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2024 ########################################################################
2025 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2028 my ($res_x,$res_y,$res_z,
2030 $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2031 # above map() describes stack layout with 10 temporary
2032 # 256-bit vectors on top.
2036 ecp_nistz256_point_double_vis3:
2037 save %sp,-STACK64_FRAME-32*10,%sp
2040 .Ldouble_shortcut_vis3:
2043 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2044 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2046 ! convert input to uint64_t[4]
2057 ld [$ap+32],$acc0 ! in_y
2065 ld [$ap+32+16],$acc2
2069 ld [$ap+32+24],$acc3
2073 stx $a0,[%sp+LOCALS64+$in_x]
2075 stx $a1,[%sp+LOCALS64+$in_x+8]
2077 stx $a2,[%sp+LOCALS64+$in_x+16]
2079 stx $a3,[%sp+LOCALS64+$in_x+24]
2081 stx $acc0,[%sp+LOCALS64+$in_y]
2083 stx $acc1,[%sp+LOCALS64+$in_y+8]
2085 stx $acc2,[%sp+LOCALS64+$in_y+16]
2086 stx $acc3,[%sp+LOCALS64+$in_y+24]
2088 ld [$ap+64],$a0 ! in_z
2106 stx $a0,[%sp+LOCALS64+$in_z]
2108 stx $a1,[%sp+LOCALS64+$in_z+8]
2110 stx $a2,[%sp+LOCALS64+$in_z+16]
2111 stx $a3,[%sp+LOCALS64+$in_z+24]
2113 ! in_y is still in $acc0-$acc3
2114 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(S, in_y);
2115 add %sp,LOCALS64+$S,$rp
2117 ! in_z is still in $a0-$a3
2118 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Zsqr, in_z);
2119 add %sp,LOCALS64+$Zsqr,$rp
2121 mov $acc0,$a0 ! put Zsqr aside
2126 add %sp,LOCALS64+$in_x,$bp
2127 call __ecp_nistz256_add_vis3 ! p256_add(M, Zsqr, in_x);
2128 add %sp,LOCALS64+$M,$rp
2130 mov $a0,$acc0 ! restore Zsqr
2131 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2133 ldx [%sp+LOCALS64+$S+8],$a1
2135 ldx [%sp+LOCALS64+$S+16],$a2
2137 ldx [%sp+LOCALS64+$S+24],$a3
2139 add %sp,LOCALS64+$in_x,$bp
2140 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(Zsqr, in_x, Zsqr);
2141 add %sp,LOCALS64+$Zsqr,$rp
2143 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(S, S);
2144 add %sp,LOCALS64+$S,$rp
2146 ldx [%sp+LOCALS64+$in_z],$bi
2147 ldx [%sp+LOCALS64+$in_y],$a0
2148 ldx [%sp+LOCALS64+$in_y+8],$a1
2149 ldx [%sp+LOCALS64+$in_y+16],$a2
2150 ldx [%sp+LOCALS64+$in_y+24],$a3
2151 add %sp,LOCALS64+$in_z,$bp
2152 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(tmp0, in_z, in_y);
2153 add %sp,LOCALS64+$tmp0,$rp
2155 ldx [%sp+LOCALS64+$M],$bi ! forward load
2156 ldx [%sp+LOCALS64+$Zsqr],$a0
2157 ldx [%sp+LOCALS64+$Zsqr+8],$a1
2158 ldx [%sp+LOCALS64+$Zsqr+16],$a2
2159 ldx [%sp+LOCALS64+$Zsqr+24],$a3
2161 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(res_z, tmp0);
2162 add %sp,LOCALS64+$res_z,$rp
2164 add %sp,LOCALS64+$M,$bp
2165 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(M, M, Zsqr);
2166 add %sp,LOCALS64+$M,$rp
2168 mov $acc0,$a0 ! put aside M
2172 call __ecp_nistz256_mul_by_2_vis3
2173 add %sp,LOCALS64+$M,$rp
2174 mov $a0,$t0 ! copy M
2175 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2177 ldx [%sp+LOCALS64+$S+8],$a1
2179 ldx [%sp+LOCALS64+$S+16],$a2
2181 ldx [%sp+LOCALS64+$S+24],$a3
2182 call __ecp_nistz256_add_noload_vis3 ! p256_mul_by_3(M, M);
2183 add %sp,LOCALS64+$M,$rp
2185 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(tmp0, S);
2186 add %sp,LOCALS64+$tmp0,$rp
2188 ldx [%sp+LOCALS64+$S],$bi ! forward load
2189 ldx [%sp+LOCALS64+$in_x],$a0
2190 ldx [%sp+LOCALS64+$in_x+8],$a1
2191 ldx [%sp+LOCALS64+$in_x+16],$a2
2192 ldx [%sp+LOCALS64+$in_x+24],$a3
2194 call __ecp_nistz256_div_by_2_vis3 ! p256_div_by_2(res_y, tmp0);
2195 add %sp,LOCALS64+$res_y,$rp
2197 add %sp,LOCALS64+$S,$bp
2198 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, in_x);
2199 add %sp,LOCALS64+$S,$rp
2201 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2202 ldx [%sp+LOCALS64+$M+8],$a1
2203 ldx [%sp+LOCALS64+$M+16],$a2
2204 ldx [%sp+LOCALS64+$M+24],$a3
2206 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(tmp0, S);
2207 add %sp,LOCALS64+$tmp0,$rp
2209 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(res_x, M);
2210 add %sp,LOCALS64+$res_x,$rp
2212 add %sp,LOCALS64+$tmp0,$bp
2213 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, tmp0);
2214 add %sp,LOCALS64+$res_x,$rp
2216 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2217 ldx [%sp+LOCALS64+$M+8],$a1
2218 ldx [%sp+LOCALS64+$M+16],$a2
2219 ldx [%sp+LOCALS64+$M+24],$a3
2221 add %sp,LOCALS64+$S,$bp
2222 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(S, S, res_x);
2223 add %sp,LOCALS64+$S,$rp
2226 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, M);
2227 add %sp,LOCALS64+$S,$rp
2229 ldx [%sp+LOCALS64+$res_x],$a0 ! forward load
2230 ldx [%sp+LOCALS64+$res_x+8],$a1
2231 ldx [%sp+LOCALS64+$res_x+16],$a2
2232 ldx [%sp+LOCALS64+$res_x+24],$a3
2234 add %sp,LOCALS64+$res_y,$bp
2235 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, S, res_y);
2236 add %sp,LOCALS64+$res_y,$bp
2238 ! convert output to uint_32[8]
2241 st $a0,[$rp_real] ! res_x
2246 st $t1,[$rp_real+12]
2247 st $a2,[$rp_real+16]
2248 st $t2,[$rp_real+20]
2249 st $a3,[$rp_real+24]
2250 st $t3,[$rp_real+28]
2252 ldx [%sp+LOCALS64+$res_z],$a0 ! forward load
2254 ldx [%sp+LOCALS64+$res_z+8],$a1
2256 ldx [%sp+LOCALS64+$res_z+16],$a2
2258 ldx [%sp+LOCALS64+$res_z+24],$a3
2260 st $acc0,[$rp_real+32] ! res_y
2261 st $t0, [$rp_real+32+4]
2262 st $acc1,[$rp_real+32+8]
2263 st $t1, [$rp_real+32+12]
2264 st $acc2,[$rp_real+32+16]
2265 st $t2, [$rp_real+32+20]
2266 st $acc3,[$rp_real+32+24]
2267 st $t3, [$rp_real+32+28]
2271 st $a0,[$rp_real+64] ! res_z
2273 st $t0,[$rp_real+64+4]
2275 st $a1,[$rp_real+64+8]
2276 st $t1,[$rp_real+64+12]
2277 st $a2,[$rp_real+64+16]
2278 st $t2,[$rp_real+64+20]
2279 st $a3,[$rp_real+64+24]
2280 st $t3,[$rp_real+64+28]
2284 .type ecp_nistz256_point_double_vis3,#function
2285 .size ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2288 ########################################################################
2289 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2290 # const P256_POINT *in2);
2292 my ($res_x,$res_y,$res_z,
2293 $in1_x,$in1_y,$in1_z,
2294 $in2_x,$in2_y,$in2_z,
2295 $H,$Hsqr,$R,$Rsqr,$Hcub,
2296 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2297 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2299 # above map() describes stack layout with 18 temporary
2300 # 256-bit vectors on top. Then we reserve some space for
2301 # !in1infty, !in2infty and result of check for zero.
2304 .globl ecp_nistz256_point_add_vis3
2306 ecp_nistz256_point_add_vis3:
2307 save %sp,-STACK64_FRAME-32*18-32,%sp
2312 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2313 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2315 ! convert input to uint64_t[4]
2316 ld [$bp],$a0 ! in2_x
2326 ld [$bp+32],$acc0 ! in2_y
2334 ld [$bp+32+16],$acc2
2338 ld [$bp+32+24],$acc3
2342 stx $a0,[%sp+LOCALS64+$in2_x]
2344 stx $a1,[%sp+LOCALS64+$in2_x+8]
2346 stx $a2,[%sp+LOCALS64+$in2_x+16]
2348 stx $a3,[%sp+LOCALS64+$in2_x+24]
2350 stx $acc0,[%sp+LOCALS64+$in2_y]
2352 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2354 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2355 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2357 ld [$bp+64],$acc0 ! in2_z
2361 ld [$bp+64+16],$acc2
2363 ld [$bp+64+24],$acc3
2367 ld [$ap],$a0 ! in1_x
2383 stx $acc0,[%sp+LOCALS64+$in2_z]
2385 stx $acc1,[%sp+LOCALS64+$in2_z+8]
2387 stx $acc2,[%sp+LOCALS64+$in2_z+16]
2388 stx $acc3,[%sp+LOCALS64+$in2_z+24]
2390 or $acc1,$acc0,$acc0
2391 or $acc3,$acc2,$acc2
2392 or $acc2,$acc0,$acc0
2393 movrnz $acc0,-1,$acc0 ! !in2infty
2394 stx $acc0,[%fp+STACK_BIAS-8]
2397 ld [$ap+32],$acc0 ! in1_y
2404 ld [$ap+32+16],$acc2
2406 ld [$ap+32+24],$acc3
2410 stx $a0,[%sp+LOCALS64+$in1_x]
2412 stx $a1,[%sp+LOCALS64+$in1_x+8]
2414 stx $a2,[%sp+LOCALS64+$in1_x+16]
2416 stx $a3,[%sp+LOCALS64+$in1_x+24]
2418 stx $acc0,[%sp+LOCALS64+$in1_y]
2420 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2422 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2423 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2425 ldx [%sp+LOCALS64+$in2_z],$a0 ! forward load
2426 ldx [%sp+LOCALS64+$in2_z+8],$a1
2427 ldx [%sp+LOCALS64+$in2_z+16],$a2
2428 ldx [%sp+LOCALS64+$in2_z+24],$a3
2430 ld [$ap+64],$acc0 ! in1_z
2434 ld [$ap+64+16],$acc2
2436 ld [$ap+64+24],$acc3
2444 stx $acc0,[%sp+LOCALS64+$in1_z]
2446 stx $acc1,[%sp+LOCALS64+$in1_z+8]
2448 stx $acc2,[%sp+LOCALS64+$in1_z+16]
2449 stx $acc3,[%sp+LOCALS64+$in1_z+24]
2451 or $acc1,$acc0,$acc0
2452 or $acc3,$acc2,$acc2
2453 or $acc2,$acc0,$acc0
2454 movrnz $acc0,-1,$acc0 ! !in1infty
2455 stx $acc0,[%fp+STACK_BIAS-16]
2457 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z2sqr, in2_z);
2458 add %sp,LOCALS64+$Z2sqr,$rp
2460 ldx [%sp+LOCALS64+$in1_z],$a0
2461 ldx [%sp+LOCALS64+$in1_z+8],$a1
2462 ldx [%sp+LOCALS64+$in1_z+16],$a2
2463 ldx [%sp+LOCALS64+$in1_z+24],$a3
2464 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2465 add %sp,LOCALS64+$Z1sqr,$rp
2467 ldx [%sp+LOCALS64+$Z2sqr],$bi
2468 ldx [%sp+LOCALS64+$in2_z],$a0
2469 ldx [%sp+LOCALS64+$in2_z+8],$a1
2470 ldx [%sp+LOCALS64+$in2_z+16],$a2
2471 ldx [%sp+LOCALS64+$in2_z+24],$a3
2472 add %sp,LOCALS64+$Z2sqr,$bp
2473 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, Z2sqr, in2_z);
2474 add %sp,LOCALS64+$S1,$rp
2476 ldx [%sp+LOCALS64+$Z1sqr],$bi
2477 ldx [%sp+LOCALS64+$in1_z],$a0
2478 ldx [%sp+LOCALS64+$in1_z+8],$a1
2479 ldx [%sp+LOCALS64+$in1_z+16],$a2
2480 ldx [%sp+LOCALS64+$in1_z+24],$a3
2481 add %sp,LOCALS64+$Z1sqr,$bp
2482 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2483 add %sp,LOCALS64+$S2,$rp
2485 ldx [%sp+LOCALS64+$S1],$bi
2486 ldx [%sp+LOCALS64+$in1_y],$a0
2487 ldx [%sp+LOCALS64+$in1_y+8],$a1
2488 ldx [%sp+LOCALS64+$in1_y+16],$a2
2489 ldx [%sp+LOCALS64+$in1_y+24],$a3
2490 add %sp,LOCALS64+$S1,$bp
2491 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, S1, in1_y);
2492 add %sp,LOCALS64+$S1,$rp
2494 ldx [%sp+LOCALS64+$S2],$bi
2495 ldx [%sp+LOCALS64+$in2_y],$a0
2496 ldx [%sp+LOCALS64+$in2_y+8],$a1
2497 ldx [%sp+LOCALS64+$in2_y+16],$a2
2498 ldx [%sp+LOCALS64+$in2_y+24],$a3
2499 add %sp,LOCALS64+$S2,$bp
2500 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2501 add %sp,LOCALS64+$S2,$rp
2503 ldx [%sp+LOCALS64+$Z2sqr],$bi ! forward load
2504 ldx [%sp+LOCALS64+$in1_x],$a0
2505 ldx [%sp+LOCALS64+$in1_x+8],$a1
2506 ldx [%sp+LOCALS64+$in1_x+16],$a2
2507 ldx [%sp+LOCALS64+$in1_x+24],$a3
2509 add %sp,LOCALS64+$S1,$bp
2510 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, S1);
2511 add %sp,LOCALS64+$R,$rp
2513 or $acc1,$acc0,$acc0 ! see if result is zero
2514 or $acc3,$acc2,$acc2
2515 or $acc2,$acc0,$acc0
2516 stx $acc0,[%fp+STACK_BIAS-24]
2518 add %sp,LOCALS64+$Z2sqr,$bp
2519 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U1, in1_x, Z2sqr);
2520 add %sp,LOCALS64+$U1,$rp
2522 ldx [%sp+LOCALS64+$Z1sqr],$bi
2523 ldx [%sp+LOCALS64+$in2_x],$a0
2524 ldx [%sp+LOCALS64+$in2_x+8],$a1
2525 ldx [%sp+LOCALS64+$in2_x+16],$a2
2526 ldx [%sp+LOCALS64+$in2_x+24],$a3
2527 add %sp,LOCALS64+$Z1sqr,$bp
2528 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in2_x, Z1sqr);
2529 add %sp,LOCALS64+$U2,$rp
2531 ldx [%sp+LOCALS64+$R],$a0 ! forward load
2532 ldx [%sp+LOCALS64+$R+8],$a1
2533 ldx [%sp+LOCALS64+$R+16],$a2
2534 ldx [%sp+LOCALS64+$R+24],$a3
2536 add %sp,LOCALS64+$U1,$bp
2537 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, U1);
2538 add %sp,LOCALS64+$H,$rp
2540 or $acc1,$acc0,$acc0 ! see if result is zero
2541 or $acc3,$acc2,$acc2
2542 orcc $acc2,$acc0,$acc0
2544 bne,pt %xcc,.Ladd_proceed_vis3 ! is_equal(U1,U2)?
2547 ldx [%fp+STACK_BIAS-8],$t0
2548 ldx [%fp+STACK_BIAS-16],$t1
2549 ldx [%fp+STACK_BIAS-24],$t2
2551 be,pt %xcc,.Ladd_proceed_vis3 ! (in1infty || in2infty)?
2554 be,a,pt %xcc,.Ldouble_shortcut_vis3 ! is_equal(S1,S2)?
2555 add %sp,32*(12-10)+32,%sp ! difference in frame sizes
2560 st %g0,[$rp_real+12]
2561 st %g0,[$rp_real+16]
2562 st %g0,[$rp_real+20]
2563 st %g0,[$rp_real+24]
2564 st %g0,[$rp_real+28]
2565 st %g0,[$rp_real+32]
2566 st %g0,[$rp_real+32+4]
2567 st %g0,[$rp_real+32+8]
2568 st %g0,[$rp_real+32+12]
2569 st %g0,[$rp_real+32+16]
2570 st %g0,[$rp_real+32+20]
2571 st %g0,[$rp_real+32+24]
2572 st %g0,[$rp_real+32+28]
2573 st %g0,[$rp_real+64]
2574 st %g0,[$rp_real+64+4]
2575 st %g0,[$rp_real+64+8]
2576 st %g0,[$rp_real+64+12]
2577 st %g0,[$rp_real+64+16]
2578 st %g0,[$rp_real+64+20]
2579 st %g0,[$rp_real+64+24]
2580 st %g0,[$rp_real+64+28]
2586 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2587 add %sp,LOCALS64+$Rsqr,$rp
2589 ldx [%sp+LOCALS64+$H],$bi
2590 ldx [%sp+LOCALS64+$in1_z],$a0
2591 ldx [%sp+LOCALS64+$in1_z+8],$a1
2592 ldx [%sp+LOCALS64+$in1_z+16],$a2
2593 ldx [%sp+LOCALS64+$in1_z+24],$a3
2594 add %sp,LOCALS64+$H,$bp
2595 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2596 add %sp,LOCALS64+$res_z,$rp
2598 ldx [%sp+LOCALS64+$H],$a0
2599 ldx [%sp+LOCALS64+$H+8],$a1
2600 ldx [%sp+LOCALS64+$H+16],$a2
2601 ldx [%sp+LOCALS64+$H+24],$a3
2602 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2603 add %sp,LOCALS64+$Hsqr,$rp
2605 ldx [%sp+LOCALS64+$res_z],$bi
2606 ldx [%sp+LOCALS64+$in2_z],$a0
2607 ldx [%sp+LOCALS64+$in2_z+8],$a1
2608 ldx [%sp+LOCALS64+$in2_z+16],$a2
2609 ldx [%sp+LOCALS64+$in2_z+24],$a3
2610 add %sp,LOCALS64+$res_z,$bp
2611 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, res_z, in2_z);
2612 add %sp,LOCALS64+$res_z,$rp
2614 ldx [%sp+LOCALS64+$H],$bi
2615 ldx [%sp+LOCALS64+$Hsqr],$a0
2616 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2617 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2618 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2619 add %sp,LOCALS64+$H,$bp
2620 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2621 add %sp,LOCALS64+$Hcub,$rp
2623 ldx [%sp+LOCALS64+$U1],$bi
2624 ldx [%sp+LOCALS64+$Hsqr],$a0
2625 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2626 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2627 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2628 add %sp,LOCALS64+$U1,$bp
2629 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, U1, Hsqr);
2630 add %sp,LOCALS64+$U2,$rp
2632 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2633 add %sp,LOCALS64+$Hsqr,$rp
2635 add %sp,LOCALS64+$Rsqr,$bp
2636 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2637 add %sp,LOCALS64+$res_x,$rp
2639 add %sp,LOCALS64+$Hcub,$bp
2640 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2641 add %sp,LOCALS64+$res_x,$rp
2643 ldx [%sp+LOCALS64+$S1],$bi ! forward load
2644 ldx [%sp+LOCALS64+$Hcub],$a0
2645 ldx [%sp+LOCALS64+$Hcub+8],$a1
2646 ldx [%sp+LOCALS64+$Hcub+16],$a2
2647 ldx [%sp+LOCALS64+$Hcub+24],$a3
2649 add %sp,LOCALS64+$U2,$bp
2650 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2651 add %sp,LOCALS64+$res_y,$rp
2653 add %sp,LOCALS64+$S1,$bp
2654 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S1, Hcub);
2655 add %sp,LOCALS64+$S2,$rp
2657 ldx [%sp+LOCALS64+$R],$bi
2658 ldx [%sp+LOCALS64+$res_y],$a0
2659 ldx [%sp+LOCALS64+$res_y+8],$a1
2660 ldx [%sp+LOCALS64+$res_y+16],$a2
2661 ldx [%sp+LOCALS64+$res_y+24],$a3
2662 add %sp,LOCALS64+$R,$bp
2663 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2664 add %sp,LOCALS64+$res_y,$rp
2666 add %sp,LOCALS64+$S2,$bp
2667 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2668 add %sp,LOCALS64+$res_y,$rp
2670 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2671 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2673 for($i=0;$i<96;$i+=16) { # conditional moves
2675 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2676 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2677 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2678 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2679 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2680 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2681 movrz $t1,$acc2,$acc0
2682 movrz $t1,$acc3,$acc1
2683 movrz $t2,$acc4,$acc0
2684 movrz $t2,$acc5,$acc1
2687 st $acc0,[$rp_real+$i]
2688 st $acc2,[$rp_real+$i+4]
2689 st $acc1,[$rp_real+$i+8]
2690 st $acc3,[$rp_real+$i+12]
2697 .type ecp_nistz256_point_add_vis3,#function
2698 .size ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2701 ########################################################################
2702 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2703 # const P256_POINT_AFFINE *in2);
2705 my ($res_x,$res_y,$res_z,
2706 $in1_x,$in1_y,$in1_z,
2708 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2710 # above map() describes stack layout with 15 temporary
2711 # 256-bit vectors on top. Then we reserve some space for
2712 # !in1infty and !in2infty.
2716 ecp_nistz256_point_add_affine_vis3:
2717 save %sp,-STACK64_FRAME-32*15-32,%sp
2722 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2723 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2725 ! convert input to uint64_t[4]
2726 ld [$bp],$a0 ! in2_x
2736 ld [$bp+32],$acc0 ! in2_y
2744 ld [$bp+32+16],$acc2
2748 ld [$bp+32+24],$acc3
2752 stx $a0,[%sp+LOCALS64+$in2_x]
2754 stx $a1,[%sp+LOCALS64+$in2_x+8]
2756 stx $a2,[%sp+LOCALS64+$in2_x+16]
2758 stx $a3,[%sp+LOCALS64+$in2_x+24]
2760 stx $acc0,[%sp+LOCALS64+$in2_y]
2762 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2764 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2765 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2769 or $acc1,$acc0,$acc0
2770 or $acc3,$acc2,$acc2
2772 or $acc2,$acc0,$acc0
2774 movrnz $a0,-1,$a0 ! !in2infty
2775 stx $a0,[%fp+STACK_BIAS-8]
2777 ld [$ap],$a0 ! in1_x
2787 ld [$ap+32],$acc0 ! in1_y
2795 ld [$ap+32+16],$acc2
2799 ld [$ap+32+24],$acc3
2803 stx $a0,[%sp+LOCALS64+$in1_x]
2805 stx $a1,[%sp+LOCALS64+$in1_x+8]
2807 stx $a2,[%sp+LOCALS64+$in1_x+16]
2809 stx $a3,[%sp+LOCALS64+$in1_x+24]
2811 stx $acc0,[%sp+LOCALS64+$in1_y]
2813 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2815 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2816 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2818 ld [$ap+64],$a0 ! in1_z
2832 stx $a0,[%sp+LOCALS64+$in1_z]
2834 stx $a1,[%sp+LOCALS64+$in1_z+8]
2836 stx $a2,[%sp+LOCALS64+$in1_z+16]
2837 stx $a3,[%sp+LOCALS64+$in1_z+24]
2842 movrnz $t0,-1,$t0 ! !in1infty
2843 stx $t0,[%fp+STACK_BIAS-16]
2845 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2846 add %sp,LOCALS64+$Z1sqr,$rp
2848 ldx [%sp+LOCALS64+$in2_x],$bi
2853 add %sp,LOCALS64+$in2_x,$bp
2854 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, Z1sqr, in2_x);
2855 add %sp,LOCALS64+$U2,$rp
2857 ldx [%sp+LOCALS64+$Z1sqr],$bi ! forward load
2858 ldx [%sp+LOCALS64+$in1_z],$a0
2859 ldx [%sp+LOCALS64+$in1_z+8],$a1
2860 ldx [%sp+LOCALS64+$in1_z+16],$a2
2861 ldx [%sp+LOCALS64+$in1_z+24],$a3
2863 add %sp,LOCALS64+$in1_x,$bp
2864 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, in1_x);
2865 add %sp,LOCALS64+$H,$rp
2867 add %sp,LOCALS64+$Z1sqr,$bp
2868 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2869 add %sp,LOCALS64+$S2,$rp
2871 ldx [%sp+LOCALS64+$H],$bi
2872 ldx [%sp+LOCALS64+$in1_z],$a0
2873 ldx [%sp+LOCALS64+$in1_z+8],$a1
2874 ldx [%sp+LOCALS64+$in1_z+16],$a2
2875 ldx [%sp+LOCALS64+$in1_z+24],$a3
2876 add %sp,LOCALS64+$H,$bp
2877 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2878 add %sp,LOCALS64+$res_z,$rp
2880 ldx [%sp+LOCALS64+$S2],$bi
2881 ldx [%sp+LOCALS64+$in2_y],$a0
2882 ldx [%sp+LOCALS64+$in2_y+8],$a1
2883 ldx [%sp+LOCALS64+$in2_y+16],$a2
2884 ldx [%sp+LOCALS64+$in2_y+24],$a3
2885 add %sp,LOCALS64+$S2,$bp
2886 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2887 add %sp,LOCALS64+$S2,$rp
2889 ldx [%sp+LOCALS64+$H],$a0 ! forward load
2890 ldx [%sp+LOCALS64+$H+8],$a1
2891 ldx [%sp+LOCALS64+$H+16],$a2
2892 ldx [%sp+LOCALS64+$H+24],$a3
2894 add %sp,LOCALS64+$in1_y,$bp
2895 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, in1_y);
2896 add %sp,LOCALS64+$R,$rp
2898 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2899 add %sp,LOCALS64+$Hsqr,$rp
2901 ldx [%sp+LOCALS64+$R],$a0
2902 ldx [%sp+LOCALS64+$R+8],$a1
2903 ldx [%sp+LOCALS64+$R+16],$a2
2904 ldx [%sp+LOCALS64+$R+24],$a3
2905 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2906 add %sp,LOCALS64+$Rsqr,$rp
2908 ldx [%sp+LOCALS64+$H],$bi
2909 ldx [%sp+LOCALS64+$Hsqr],$a0
2910 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2911 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2912 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2913 add %sp,LOCALS64+$H,$bp
2914 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2915 add %sp,LOCALS64+$Hcub,$rp
2917 ldx [%sp+LOCALS64+$Hsqr],$bi
2918 ldx [%sp+LOCALS64+$in1_x],$a0
2919 ldx [%sp+LOCALS64+$in1_x+8],$a1
2920 ldx [%sp+LOCALS64+$in1_x+16],$a2
2921 ldx [%sp+LOCALS64+$in1_x+24],$a3
2922 add %sp,LOCALS64+$Hsqr,$bp
2923 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in1_x, Hsqr);
2924 add %sp,LOCALS64+$U2,$rp
2926 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2927 add %sp,LOCALS64+$Hsqr,$rp
2929 add %sp,LOCALS64+$Rsqr,$bp
2930 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2931 add %sp,LOCALS64+$res_x,$rp
2933 add %sp,LOCALS64+$Hcub,$bp
2934 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2935 add %sp,LOCALS64+$res_x,$rp
2937 ldx [%sp+LOCALS64+$Hcub],$bi ! forward load
2938 ldx [%sp+LOCALS64+$in1_y],$a0
2939 ldx [%sp+LOCALS64+$in1_y+8],$a1
2940 ldx [%sp+LOCALS64+$in1_y+16],$a2
2941 ldx [%sp+LOCALS64+$in1_y+24],$a3
2943 add %sp,LOCALS64+$U2,$bp
2944 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2945 add %sp,LOCALS64+$res_y,$rp
2947 add %sp,LOCALS64+$Hcub,$bp
2948 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, in1_y, Hcub);
2949 add %sp,LOCALS64+$S2,$rp
2951 ldx [%sp+LOCALS64+$R],$bi
2952 ldx [%sp+LOCALS64+$res_y],$a0
2953 ldx [%sp+LOCALS64+$res_y+8],$a1
2954 ldx [%sp+LOCALS64+$res_y+16],$a2
2955 ldx [%sp+LOCALS64+$res_y+24],$a3
2956 add %sp,LOCALS64+$R,$bp
2957 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2958 add %sp,LOCALS64+$res_y,$rp
2960 add %sp,LOCALS64+$S2,$bp
2961 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2962 add %sp,LOCALS64+$res_y,$rp
2964 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2965 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2967 add %o7,.Lone_mont_vis3-1b,$bp
2969 for($i=0;$i<64;$i+=16) { # conditional moves
2971 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2972 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2973 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2974 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2975 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2976 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2977 movrz $t1,$acc2,$acc0
2978 movrz $t1,$acc3,$acc1
2979 movrz $t2,$acc4,$acc0
2980 movrz $t2,$acc5,$acc1
2983 st $acc0,[$rp_real+$i]
2984 st $acc2,[$rp_real+$i+4]
2985 st $acc1,[$rp_real+$i+8]
2986 st $acc3,[$rp_real+$i+12]
2989 for(;$i<96;$i+=16) {
2991 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2992 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2993 ldx [$bp+$i-64],$acc2 ! "in2"
2994 ldx [$bp+$i-64+8],$acc3
2995 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2996 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2997 movrz $t1,$acc2,$acc0
2998 movrz $t1,$acc3,$acc1
2999 movrz $t2,$acc4,$acc0
3000 movrz $t2,$acc5,$acc1
3003 st $acc0,[$rp_real+$i]
3004 st $acc2,[$rp_real+$i+4]
3005 st $acc1,[$rp_real+$i+8]
3006 st $acc3,[$rp_real+$i+12]
3012 .type ecp_nistz256_point_add_affine_vis3,#function
3013 .size ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3016 .long 0x00000000,0x00000001, 0xffffffff,0x00000000
3017 .long 0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3022 # Purpose of these subroutines is to explicitly encode VIS instructions,
3023 # so that one can compile the module without having to specify VIS
3024 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3025 # Idea is to reserve for option to produce "universal" binary and let
3026 # programmer detect if current CPU is VIS capable at run-time.
3028 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3029 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3031 my %visopf = ( "addxc" => 0x011,
3033 "umulxhi" => 0x016 );
3035 $ref = "$mnemonic\t$rs1,$rs2,$rd";
3037 if ($opf=$visopf{$mnemonic}) {
3038 foreach ($rs1,$rs2,$rd) {
3039 return $ref if (!/%([goli])([0-9])/);
3043 return sprintf ".word\t0x%08x !%s",
3044 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3051 foreach (split("\n",$code)) {
3052 s/\`([^\`]*)\`/eval $1/ge;
3054 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3055 &unvis3($1,$2,$3,$4)